diff --git a/.gitmodules b/.gitmodules index 5520c9b9188f0484ddc34eef630a86bca933a7a3..8179b9caa5701f7a94ed6c1dbda3ad75db1b866e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -6,13 +6,13 @@ url = https://github.com/taosdata/hivemq-tdengine-extension.git [submodule "deps/jemalloc"] path = deps/jemalloc - url = https://github.com/jemalloc/jemalloc + url = https://github.com/jemalloc/jemalloc.git [submodule "src/kit/taos-tools"] path = src/kit/taos-tools - url = https://github.com/taosdata/taos-tools + url = https://github.com/taosdata/taos-tools.git [submodule "src/plugins/taosadapter"] path = src/plugins/taosadapter - url = https://github.com/taosdata/taosadapter + url = https://github.com/taosdata/taosadapter.git [submodule "examples/rust"] path = examples/rust url = https://github.com/songtianyi/tdengine-rust-bindings.git diff --git a/.mailmap b/.mailmap new file mode 100644 index 0000000000000000000000000000000000000000..9e5fb9468dbc65cd2bacb51b78a055998fad5bf6 --- /dev/null +++ b/.mailmap @@ -0,0 +1,16 @@ +# +# This list is used by git-shortlog to fix a few botched name translations +# in the git archive, either because the author's full name was messed up +# and/or not always written the same way, making contributions from the +# same person appearing not to be so or badly displayed. Also allows for +# old email addresses to map to new email addresses. +# +# For format details, see "MAPPING AUTHORS" in "man git-shortlog". +# +# Please keep this list dictionary sorted. +# + +Jeff Tao +Wade Zhang +Shuduo Sang +Pan Yang \ No newline at end of file diff --git a/README.md b/README.md index 6e6b13821892839216fad1e3e5839fa623eb59e1..aa944ef75dab53bcfec068a59285b561d064b429 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ We are hiring, check [here](https://www.taosdata.com/en/careers/) # What is TDengine? -TDengine is a high-performance, scalable time-series database with SQL support. Its code including cluster feature is open source under [GNU AGPL v3.0](http://www.gnu.org/licenses/agpl-3.0.html). Besides the database, it provides caching, stream processing, data data subscription and other functionalities to reduce the complexity and cost of development and operation. TDengine differentiates itself from other TSDBs with the following advantages. +TDengine is a high-performance, scalable time-series database with SQL support. Its code including cluster feature is open source under [GNU AGPL v3.0](http://www.gnu.org/licenses/agpl-3.0.html). Besides the database, it provides caching, stream processing, data subscription and other functionalities to reduce the complexity and cost of development and operation. TDengine differentiates itself from other TSDBs with the following advantages. - **High Performance**: TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage cost and compute costs, with an innovatively designed and purpose-built storage engine. diff --git a/cmake/define.inc b/cmake/define.inc index c1169b994804666c553b89300d140325341a14f0..8cb145e92dddda180706cd3d4494aded170cbb57 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -242,8 +242,14 @@ IF (TD_WINDOWS) ADD_DEFINITIONS(-DPTW32_BUILD) ADD_DEFINITIONS(-D_MBCS -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE) SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE) + IF (CMAKE_DEPFILE_FLAGS_C) + SET(CMAKE_DEPFILE_FLAGS_C "") + ENDIF () + IF (CMAKE_DEPFILE_FLAGS_CXX) + SET(CMAKE_DEPFILE_FLAGS_CXX "") + ENDIF () IF (NOT TD_GODLL) - SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd4999 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-") + SET(COMMON_FLAGS "/nologo /wd4018 /wd4999 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-") IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900)) SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18") ENDIF () diff --git a/cmake/version.inc b/cmake/version.inc index ae16262748653f7955a54cec0474f55611a7fd6d..dbd2277f9513d1698803e7316a51af588c3efda8 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.4.0.0") + SET(TD_VER_NUMBER "2.7.0.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/docs-cn/01-index.md b/docs-cn/01-index.md new file mode 100644 index 0000000000000000000000000000000000000000..d2e6706892f3997af115e71d1da455ebce2ecbec --- /dev/null +++ b/docs-cn/01-index.md @@ -0,0 +1,25 @@ +--- +title: TDengine 文档 +sidebar_label: 文档首页 +slug: / +--- + +TDengine 是一款[高性能](https://www.taosdata.com/fast)、[分布式](https://www.taosdata.com/scalable)、[支持 SQL](https://www.taosdata.com/sql-support) 的时序数据库 (Database)。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。 + +TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。 + +如果你是开发者,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、连续查询、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要把示例代码拷贝粘贴,针对自己的应用稍作改动,就能跑起来。 + +我们已经生活在大数据的时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请参考[集群管理](./cluster)一章。 + +TDengine 采用 SQL 作为其查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。 + +如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出,配置参数,怎么监测 TDengine 是否健康运行,怎么提升系统运行的性能,那么请仔细参考[运维指南](./operation)一章。 + +如果你对 TDengine 外围工具,REST API, 各种编程语言的连接器想做更多详细了解,请看[参考指南](./reference)一章。 + +如果你对 TDengine 内部的架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。 + +最后,作为一个开源软件,欢迎大家的参与。如果发现文档的任何错误,描述不清晰的地方,都请在每个页面的最下方,点击“编辑本文档“直接进行修改。 + +Together, we make a difference! diff --git a/docs-cn/01-intro/_category_.yml b/docs-cn/01-intro/_category_.yml deleted file mode 100644 index 1fdaed6b943d4a0877325bf00005307fb2f19880..0000000000000000000000000000000000000000 --- a/docs-cn/01-intro/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: TDengine 介绍 diff --git a/docs-cn/01-intro/eco_system.png b/docs-cn/01-intro/eco_system.png deleted file mode 100644 index bf8bf8f1e0a2311fc12202d712a8a2f9b8ce419b..0000000000000000000000000000000000000000 Binary files a/docs-cn/01-intro/eco_system.png and /dev/null differ diff --git a/docs-cn/01-intro/01-intro.md b/docs-cn/02-intro.md similarity index 91% rename from docs-cn/01-intro/01-intro.md rename to docs-cn/02-intro.md index 7dd40eeff043c1d364f085000ae5a2ebcf196389..673c2e96b65814fc1cd572d54f948793ed6fa521 100644 --- a/docs-cn/01-intro/01-intro.md +++ b/docs-cn/02-intro.md @@ -1,10 +1,9 @@ --- -sidebar_label: 产品简介 title: 产品简介 toc_max_heading_level: 2 --- -TDengine 是一款高性能、分布式、支持 SQL 的时序数据库,其核心代码,包括集群功能全部开源(开源协议,AGPL v3.0)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](/develop/cache/)、[数据订阅](/develop/subscribe)、[流式计算](/develop/continuous-query)等大数据平台所需要的系列功能,最大程度减少研发和运维的复杂度。 +TDengine 是一款高性能、分布式、支持 SQL 的时序数据库 (Database),其核心代码,包括集群功能全部开源(开源协议,AGPL v3.0)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库 (Database) 功能外,TDengine 还提供[缓存](/develop/cache/)、[数据订阅](/develop/subscribe)、[流式计算](/develop/continuous-query)等大数据平台所需要的系列功能,最大程度减少研发和运维的复杂度。 本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。 @@ -63,16 +62,16 @@ TDengine的主要功能如下:
-![TDengine技术生态图](eco_system.png) +![TDengine Database 技术生态图](eco_system.webp)
图 1. TDengine技术生态图
上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka, 他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序 (CLI) 以及可视化管理管理。 -## TDengine 总体适用场景 +## 总体适用场景 -作为一个高性能、分布式、支持 SQL 的时序数据库,TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。本文对适用场景做更多详细的分析。 +作为一个高性能、分布式、支持 SQL 的时序数据库 (Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。本文对适用场景做更多详细的分析。 ### 数据源特点和需求 @@ -120,7 +119,6 @@ TDengine的主要功能如下: - [用 InfluxDB 开源的性能测试工具对比 InfluxDB 和 TDengine](https://www.taosdata.com/blog/2020/01/13/1105.html) - [TDengine 与 OpenTSDB 对比测试](https://www.taosdata.com/blog/2019/08/21/621.html) - [TDengine 与 Cassandra 对比测试](https://www.taosdata.com/blog/2019/08/14/573.html) -- [TDengine 与 InfluxDB 对比测试](https://www.taosdata.com/blog/2019/07/19/419.html) - [TDengine VS InfluxDB ,写入性能大 PK !](https://www.taosdata.com/2021/11/05/3248.html) - [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html) - [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf) diff --git a/docs-cn/02-concept/_category_.yml b/docs-cn/04-concept/_category_.yml similarity index 100% rename from docs-cn/02-concept/_category_.yml rename to docs-cn/04-concept/_category_.yml diff --git a/docs-cn/02-concept/02-concept.md b/docs-cn/04-concept/index.md similarity index 97% rename from docs-cn/02-concept/02-concept.md rename to docs-cn/04-concept/index.md index ca25595260953f8d941ccaf367bdc45a8325488f..8e97d4a2f43537c1229c8e8ea092ddfc1257dde7 100644 --- a/docs-cn/02-concept/02-concept.md +++ b/docs-cn/04-concept/index.md @@ -29,7 +29,7 @@ title: 数据模型和基本概念 10.3 219 0.31 -Beijing.Chaoyang +California.SanFrancisco 2 @@ -38,7 +38,7 @@ title: 数据模型和基本概念 10.2 220 0.23 -Beijing.Chaoyang +California.SanFrancisco 3 @@ -47,7 +47,7 @@ title: 数据模型和基本概念 11.5 221 0.35 -Beijing.Haidian +California.LosAngeles 3 @@ -56,7 +56,7 @@ title: 数据模型和基本概念 13.4 223 0.29 -Beijing.Haidian +California.LosAngeles 2 @@ -65,7 +65,7 @@ title: 数据模型和基本概念 12.6 218 0.33 -Beijing.Chaoyang +California.SanFrancisco 2 @@ -74,7 +74,7 @@ title: 数据模型和基本概念 11.8 221 0.28 -Beijing.Haidian +California.LosAngeles 2 @@ -83,7 +83,7 @@ title: 数据模型和基本概念 10.3 218 0.25 -Beijing.Chaoyang +California.SanFrancisco 3 @@ -92,7 +92,7 @@ title: 数据模型和基本概念 12.3 221 0.31 -Beijing.Chaoyang +California.SanFrancisco 2 diff --git a/docs-cn/04-develop/01-connect/_connect_python.mdx b/docs-cn/04-develop/01-connect/_connect_python.mdx deleted file mode 100644 index 43e13ab56eeada9d86eae057a1ab0eafcd5247de..0000000000000000000000000000000000000000 --- a/docs-cn/04-develop/01-connect/_connect_python.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```python title="原生连接" -{{#include docs-examples/python/connect_exmaple.py}} -``` diff --git a/docs-cn/03-get-started/_apt_get_install.mdx b/docs-cn/05-get-started/_apt_get_install.mdx similarity index 100% rename from docs-cn/03-get-started/_apt_get_install.mdx rename to docs-cn/05-get-started/_apt_get_install.mdx diff --git a/docs-cn/03-get-started/_category_.yml b/docs-cn/05-get-started/_category_.yml similarity index 100% rename from docs-cn/03-get-started/_category_.yml rename to docs-cn/05-get-started/_category_.yml diff --git a/docs-cn/03-get-started/_pkg_install.mdx b/docs-cn/05-get-started/_pkg_install.mdx similarity index 100% rename from docs-cn/03-get-started/_pkg_install.mdx rename to docs-cn/05-get-started/_pkg_install.mdx diff --git a/docs-cn/03-get-started/03-get-started.md b/docs-cn/05-get-started/index.md similarity index 87% rename from docs-cn/03-get-started/03-get-started.md rename to docs-cn/05-get-started/index.md index 4d535059be13476efd377b37bec98d9690a178de..878d7f020245fbff383308c281fbc3fa28ba5f6c 100644 --- a/docs-cn/03-get-started/03-get-started.md +++ b/docs-cn/05-get-started/index.md @@ -10,7 +10,7 @@ import AptGetInstall from "./\_apt_get_install.mdx"; ## 安装 -TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件,目前 2.X 版服务端 taosd 和 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。应用驱动 taosc 与 TDengine CLI 可以在 Windows 或 Linux 上安装和运行。TDengine 除 [RESTful 接口](/reference/taosadapter)外,还提供一些列编程语言的连接器。2.4 之前的版本中,无 taosAdapter,RESTful 接口均由 taosd 内置的 HTTP 服务提供。 +TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件,目前 2.X 版服务端 taosd 和 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。应用驱动 taosc 与 TDengine CLI 可以在 Windows 或 Linux 上安装和运行。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](/reference/taosadapter) 提供 [RESTful 接口](/reference/rest-api)。但在 2.4 之前的版本中没有 taosAdapter,RESTful 接口是由 taosd 内置的 HTTP 服务提供的。 TDengine 支持 X64/ARM64/MIPS64/Alpha64 硬件平台,后续将支持 ARM32、RISC-V 等 CPU 架构。 @@ -94,7 +94,7 @@ which systemctl ## TDengine 命令行 (CLI) -为便于检查 TDengine 的状态,执行各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI) taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可。 +为便于检查 TDengine 的状态,执行数据库 (Database) 的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI) taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可。 ```bash taos @@ -106,7 +106,7 @@ taos taos> ``` -在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例: +在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行数据库(database)插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例: ```sql create database demo; @@ -132,7 +132,7 @@ Query OK, 2 row(s) in set (0.003128s) taosBenchmark ``` -该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。 +该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。 这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。 @@ -154,10 +154,10 @@ taos> select count(*) from test.meters; taos> select avg(current), max(voltage), min(phase) from test.meters; ``` -查询 location="beijing" 的记录总条数: +查询 location="California.SanFrancisco" 的记录总条数: ```sql -taos> select count(*) from test.meters where location="beijing"; +taos> select count(*) from test.meters where location="California.SanFrancisco"; ``` 查询 groupId=10 的所有记录的平均值、最大值、最小值等: diff --git a/docs-cn/04-develop/01-connect/_category_.yml b/docs-cn/07-develop/01-connect/_category_.yml similarity index 100% rename from docs-cn/04-develop/01-connect/_category_.yml rename to docs-cn/07-develop/01-connect/_category_.yml diff --git a/docs-cn/04-develop/01-connect/_connect_c.mdx b/docs-cn/07-develop/01-connect/_connect_c.mdx similarity index 100% rename from docs-cn/04-develop/01-connect/_connect_c.mdx rename to docs-cn/07-develop/01-connect/_connect_c.mdx diff --git a/docs-cn/04-develop/01-connect/_connect_cs.mdx b/docs-cn/07-develop/01-connect/_connect_cs.mdx similarity index 100% rename from docs-cn/04-develop/01-connect/_connect_cs.mdx rename to docs-cn/07-develop/01-connect/_connect_cs.mdx diff --git a/docs-cn/04-develop/01-connect/_connect_go.mdx b/docs-cn/07-develop/01-connect/_connect_go.mdx similarity index 100% rename from docs-cn/04-develop/01-connect/_connect_go.mdx rename to docs-cn/07-develop/01-connect/_connect_go.mdx diff --git a/docs-cn/04-develop/01-connect/_connect_java.mdx b/docs-cn/07-develop/01-connect/_connect_java.mdx similarity index 100% rename from docs-cn/04-develop/01-connect/_connect_java.mdx rename to docs-cn/07-develop/01-connect/_connect_java.mdx diff --git a/docs-cn/04-develop/01-connect/_connect_node.mdx b/docs-cn/07-develop/01-connect/_connect_node.mdx similarity index 100% rename from docs-cn/04-develop/01-connect/_connect_node.mdx rename to docs-cn/07-develop/01-connect/_connect_node.mdx diff --git a/docs-cn/04-develop/01-connect/_connect_php.mdx b/docs-cn/07-develop/01-connect/_connect_php.mdx similarity index 100% rename from docs-cn/04-develop/01-connect/_connect_php.mdx rename to docs-cn/07-develop/01-connect/_connect_php.mdx diff --git a/docs-cn/07-develop/01-connect/_connect_python.mdx b/docs-cn/07-develop/01-connect/_connect_python.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c0043c752e14bcc38f97c1046f2852a3f7fa2b7b --- /dev/null +++ b/docs-cn/07-develop/01-connect/_connect_python.mdx @@ -0,0 +1,3 @@ +```python title="原生连接" +{{#include docs-examples/python/connect_example.py}} +``` diff --git a/docs-cn/04-develop/01-connect/_connect_r.mdx b/docs-cn/07-develop/01-connect/_connect_r.mdx similarity index 100% rename from docs-cn/04-develop/01-connect/_connect_r.mdx rename to docs-cn/07-develop/01-connect/_connect_r.mdx diff --git a/docs-cn/04-develop/01-connect/_connect_rust.mdx b/docs-cn/07-develop/01-connect/_connect_rust.mdx similarity index 100% rename from docs-cn/04-develop/01-connect/_connect_rust.mdx rename to docs-cn/07-develop/01-connect/_connect_rust.mdx diff --git a/docs-cn/04-develop/01-connect/index.md b/docs-cn/07-develop/01-connect/index.md similarity index 99% rename from docs-cn/04-develop/01-connect/index.md rename to docs-cn/07-develop/01-connect/index.md index ebdefc77b9cc23712626f7543e0e5cc29db3e080..3a15d03f93cee7dd064f29b4911019cae3632b9a 100644 --- a/docs-cn/04-develop/01-connect/index.md +++ b/docs-cn/07-develop/01-connect/index.md @@ -33,7 +33,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 关键不同点在于: 1. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但性能要下降 30%左右。 -2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](/reference/connector/cpp#参数绑定-api)、[订阅](reference/connector/cpp#数据订阅接口)等等。 +2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](/reference/connector/cpp#参数绑定-api)、[订阅](/reference/connector/cpp#订阅和消费-api)等等。 ## 安装客户端驱动 taosc diff --git a/docs-cn/04-develop/02-model/_category_.yml b/docs-cn/07-develop/02-model/_category_.yml similarity index 100% rename from docs-cn/04-develop/02-model/_category_.yml rename to docs-cn/07-develop/02-model/_category_.yml diff --git a/docs-cn/04-develop/02-model/index.mdx b/docs-cn/07-develop/02-model/index.mdx similarity index 92% rename from docs-cn/04-develop/02-model/index.mdx rename to docs-cn/07-develop/02-model/index.mdx index a060e3c84b8c5b8e25714ce15fb2bc7afc7d49d2..7e2762b6e78393493c2c5b61959e9a6ff57a7b13 100644 --- a/docs-cn/04-develop/02-model/index.mdx +++ b/docs-cn/07-develop/02-model/index.mdx @@ -55,10 +55,10 @@ CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAG TDengine 对每个数据采集点需要独立建表。与标准的关系型数据库一样,一张表有表名,Schema,但除此之外,还可以带有一到多个标签。创建时,需要使用超级表做模板,同时指定标签的具体值。以[表 1](/tdinternal/arch#model_table1)中的智能电表为例,可以使用如下的 SQL 命令建表: ```sql -CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); +CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); ``` -其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 ”Beijing.Chaoyang",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。 +其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。 :::warning 目前 TDengine 没有从技术层面限制使用一个 database (db1) 的超级表作为模板建立另一个 database (db2) 的子表,后续会禁止这种用法,不建议使用这种方法建表。 @@ -72,10 +72,10 @@ TDengine 建议将数据采集点的全局唯一 ID 作为表名(比如设备序 在某些特殊场景中,用户在写数据时并不确定某个数据采集点的表是否存在,此时可在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表且后面的 USING 语句被忽略。比如: ```sql -INSERT INTO d1001 USING meters TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); +INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now, 10.2, 219, 0.32); ``` -上述 SQL 语句将记录`(now, 10.2, 219, 0.32)`插入表 d1001。如果表 d1001 还未创建,则使用超级表 meters 做模板自动创建,同时打上标签值 `"Beijing.Chaoyang", 2`。 +上述 SQL 语句将记录`(now, 10.2, 219, 0.32)`插入表 d1001。如果表 d1001 还未创建,则使用超级表 meters 做模板自动创建,同时打上标签值 `"California.SanFrancisco", 2`。 关于自动建表的详细语法请参见 [插入记录时自动建表](/taos-sql/insert#插入记录时自动建表) 章节。 diff --git a/docs-cn/04-develop/03-insert-data/01-sql-writing.mdx b/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx similarity index 98% rename from docs-cn/04-develop/03-insert-data/01-sql-writing.mdx rename to docs-cn/07-develop/03-insert-data/01-sql-writing.mdx index e63ffce6dd07366da99fe1f41d0a2a8d7a623f31..99a92573c87d0f90f699a8d1352619f4df4aef39 100644 --- a/docs-cn/04-develop/03-insert-data/01-sql-writing.mdx +++ b/docs-cn/07-develop/03-insert-data/01-sql-writing.mdx @@ -52,7 +52,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, :::info -- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 16K,一条 SQL 语句总长度不能超过 1M 。 +- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 48K,一条 SQL 语句总长度不能超过 1M 。 - TDengine 支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开 20 个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程频繁切换,带来额外开销。 ::: diff --git a/docs-cn/04-develop/03-insert-data/02-influxdb-line.mdx b/docs-cn/07-develop/03-insert-data/02-influxdb-line.mdx similarity index 95% rename from docs-cn/04-develop/03-insert-data/02-influxdb-line.mdx rename to docs-cn/07-develop/03-insert-data/02-influxdb-line.mdx index dedd7f0e70834e21257bda78dd184f5ddc520160..54f02c91475bb5524e259a0aa890363603a86fba 100644 --- a/docs-cn/04-develop/03-insert-data/02-influxdb-line.mdx +++ b/docs-cn/07-develop/03-insert-data/02-influxdb-line.mdx @@ -29,7 +29,7 @@ measurement,tag_set field_set timestamp 例如: ``` -meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500 +meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500 ``` :::note @@ -42,7 +42,6 @@ meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 16 要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议) - ## 示例代码 diff --git a/docs-cn/04-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs-cn/07-develop/03-insert-data/03-opentsdb-telnet.mdx similarity index 86% rename from docs-cn/04-develop/03-insert-data/03-opentsdb-telnet.mdx rename to docs-cn/07-develop/03-insert-data/03-opentsdb-telnet.mdx index dfbe6efda67b6928999287900637e0a251b86562..2b397e1bdc7a4c76686cd4b6d457a25dbcc2c950 100644 --- a/docs-cn/04-develop/03-insert-data/03-opentsdb-telnet.mdx +++ b/docs-cn/07-develop/03-insert-data/03-opentsdb-telnet.mdx @@ -29,10 +29,10 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB 例如: ```txt -meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3 +meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3 ``` -参考[OpenTSDB Telnet API文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。 +参考[OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。 ## 示例代码 @@ -76,9 +76,9 @@ Query OK, 2 row(s) in set (0.002544s) taos> select tbname, * from `meters.current`; tbname | ts | value | groupid | location | ================================================================================================================================== - t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | Beijing.Haidian | - t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | Beijing.Haidian | - t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | Beijing.Chaoyang | - t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | Beijing.Chaoyang | + t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles | + t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LosAngeles | + t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | California.SanFrancisco | + t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco | Query OK, 4 row(s) in set (0.005399s) ``` diff --git a/docs-cn/04-develop/03-insert-data/04-opentsdb-json.mdx b/docs-cn/07-develop/03-insert-data/04-opentsdb-json.mdx similarity index 82% rename from docs-cn/04-develop/03-insert-data/04-opentsdb-json.mdx rename to docs-cn/07-develop/03-insert-data/04-opentsdb-json.mdx index 5d445997d061ca052e4f3673b8e881ea4acf0ade..a15f80a5851ad29605e871f16aed60b68109038a 100644 --- a/docs-cn/04-develop/03-insert-data/04-opentsdb-json.mdx +++ b/docs-cn/07-develop/03-insert-data/04-opentsdb-json.mdx @@ -19,33 +19,33 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据 ```json [ - { - "metric": "sys.cpu.nice", - "timestamp": 1346846400, - "value": 18, - "tags": { - "host": "web01", - "dc": "lga" - } - }, - { - "metric": "sys.cpu.nice", - "timestamp": 1346846400, - "value": 9, - "tags": { - "host": "web02", - "dc": "lga" - } + { + "metric": "sys.cpu.nice", + "timestamp": 1346846400, + "value": 18, + "tags": { + "host": "web01", + "dc": "lga" } + }, + { + "metric": "sys.cpu.nice", + "timestamp": 1346846400, + "value": 9, + "tags": { + "host": "web02", + "dc": "lga" + } + } ] ``` 与 OpenTSDB 行协议类似, metric 将作为超级表名, timestamp 表示时间戳,value 表示度量值, tags 表示标签集。 - -参考[OpenTSDB HTTP API文档](http://opentsdb.net/docs/build/html/api_http/put.html)。 +参考[OpenTSDB HTTP API 文档](http://opentsdb.net/docs/build/html/api_http/put.html)。 :::note + - 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 nchar 类型, 字符串将将转为 nchar 类型, 数值将同样转换为 double 类型。 - TDengine 只接收 JSON **数组格式**的字符串,即使一行数据也需要转换成数组形式。 @@ -93,7 +93,7 @@ Query OK, 2 row(s) in set (0.001954s) taos> select * from `meters.current`; ts | value | groupid | location | =================================================================================================================== - 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | Beijing.Chaoyang | - 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | Beijing.Chaoyang | + 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco | + 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco | Query OK, 2 row(s) in set (0.004076s) ``` diff --git a/docs-cn/04-develop/03-insert-data/_c_line.mdx b/docs-cn/07-develop/03-insert-data/_c_line.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_c_line.mdx rename to docs-cn/07-develop/03-insert-data/_c_line.mdx diff --git a/docs-cn/04-develop/03-insert-data/_c_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_c_opts_json.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_c_opts_json.mdx rename to docs-cn/07-develop/03-insert-data/_c_opts_json.mdx diff --git a/docs-cn/04-develop/03-insert-data/_c_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_c_opts_telnet.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_c_opts_telnet.mdx rename to docs-cn/07-develop/03-insert-data/_c_opts_telnet.mdx diff --git a/docs-cn/04-develop/03-insert-data/_c_sql.mdx b/docs-cn/07-develop/03-insert-data/_c_sql.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_c_sql.mdx rename to docs-cn/07-develop/03-insert-data/_c_sql.mdx diff --git a/docs-cn/04-develop/03-insert-data/_c_stmt.mdx b/docs-cn/07-develop/03-insert-data/_c_stmt.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_c_stmt.mdx rename to docs-cn/07-develop/03-insert-data/_c_stmt.mdx diff --git a/docs-cn/04-develop/03-insert-data/_category_.yml b/docs-cn/07-develop/03-insert-data/_category_.yml similarity index 100% rename from docs-cn/04-develop/03-insert-data/_category_.yml rename to docs-cn/07-develop/03-insert-data/_category_.yml diff --git a/docs-cn/04-develop/03-insert-data/_cs_line.mdx b/docs-cn/07-develop/03-insert-data/_cs_line.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_cs_line.mdx rename to docs-cn/07-develop/03-insert-data/_cs_line.mdx diff --git a/docs-cn/04-develop/03-insert-data/_cs_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_cs_opts_json.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_cs_opts_json.mdx rename to docs-cn/07-develop/03-insert-data/_cs_opts_json.mdx diff --git a/docs-cn/04-develop/03-insert-data/_cs_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_cs_opts_telnet.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_cs_opts_telnet.mdx rename to docs-cn/07-develop/03-insert-data/_cs_opts_telnet.mdx diff --git a/docs-cn/04-develop/03-insert-data/_cs_sql.mdx b/docs-cn/07-develop/03-insert-data/_cs_sql.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_cs_sql.mdx rename to docs-cn/07-develop/03-insert-data/_cs_sql.mdx diff --git a/docs-cn/04-develop/03-insert-data/_cs_stmt.mdx b/docs-cn/07-develop/03-insert-data/_cs_stmt.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_cs_stmt.mdx rename to docs-cn/07-develop/03-insert-data/_cs_stmt.mdx diff --git a/docs-cn/04-develop/03-insert-data/_go_line.mdx b/docs-cn/07-develop/03-insert-data/_go_line.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_go_line.mdx rename to docs-cn/07-develop/03-insert-data/_go_line.mdx diff --git a/docs-cn/04-develop/03-insert-data/_go_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_go_opts_json.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_go_opts_json.mdx rename to docs-cn/07-develop/03-insert-data/_go_opts_json.mdx diff --git a/docs-cn/04-develop/03-insert-data/_go_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_go_opts_telnet.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_go_opts_telnet.mdx rename to docs-cn/07-develop/03-insert-data/_go_opts_telnet.mdx diff --git a/docs-cn/04-develop/03-insert-data/_go_sql.mdx b/docs-cn/07-develop/03-insert-data/_go_sql.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_go_sql.mdx rename to docs-cn/07-develop/03-insert-data/_go_sql.mdx diff --git a/docs-cn/04-develop/03-insert-data/_go_stmt.mdx b/docs-cn/07-develop/03-insert-data/_go_stmt.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_go_stmt.mdx rename to docs-cn/07-develop/03-insert-data/_go_stmt.mdx diff --git a/docs-cn/04-develop/03-insert-data/_java_line.mdx b/docs-cn/07-develop/03-insert-data/_java_line.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_java_line.mdx rename to docs-cn/07-develop/03-insert-data/_java_line.mdx diff --git a/docs-cn/04-develop/03-insert-data/_java_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_java_opts_json.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_java_opts_json.mdx rename to docs-cn/07-develop/03-insert-data/_java_opts_json.mdx diff --git a/docs-cn/04-develop/03-insert-data/_java_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_java_opts_telnet.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_java_opts_telnet.mdx rename to docs-cn/07-develop/03-insert-data/_java_opts_telnet.mdx diff --git a/docs-cn/04-develop/03-insert-data/_java_sql.mdx b/docs-cn/07-develop/03-insert-data/_java_sql.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_java_sql.mdx rename to docs-cn/07-develop/03-insert-data/_java_sql.mdx diff --git a/docs-cn/04-develop/03-insert-data/_java_stmt.mdx b/docs-cn/07-develop/03-insert-data/_java_stmt.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_java_stmt.mdx rename to docs-cn/07-develop/03-insert-data/_java_stmt.mdx diff --git a/docs-cn/04-develop/03-insert-data/_js_line.mdx b/docs-cn/07-develop/03-insert-data/_js_line.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_js_line.mdx rename to docs-cn/07-develop/03-insert-data/_js_line.mdx diff --git a/docs-cn/04-develop/03-insert-data/_js_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_js_opts_json.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_js_opts_json.mdx rename to docs-cn/07-develop/03-insert-data/_js_opts_json.mdx diff --git a/docs-cn/04-develop/03-insert-data/_js_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_js_opts_telnet.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_js_opts_telnet.mdx rename to docs-cn/07-develop/03-insert-data/_js_opts_telnet.mdx diff --git a/docs-cn/04-develop/03-insert-data/_js_sql.mdx b/docs-cn/07-develop/03-insert-data/_js_sql.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_js_sql.mdx rename to docs-cn/07-develop/03-insert-data/_js_sql.mdx diff --git a/docs-cn/04-develop/03-insert-data/_js_stmt.mdx b/docs-cn/07-develop/03-insert-data/_js_stmt.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_js_stmt.mdx rename to docs-cn/07-develop/03-insert-data/_js_stmt.mdx diff --git a/docs-cn/04-develop/03-insert-data/_php_sql.mdx b/docs-cn/07-develop/03-insert-data/_php_sql.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_php_sql.mdx rename to docs-cn/07-develop/03-insert-data/_php_sql.mdx diff --git a/docs-cn/04-develop/03-insert-data/_php_stmt.mdx b/docs-cn/07-develop/03-insert-data/_php_stmt.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_php_stmt.mdx rename to docs-cn/07-develop/03-insert-data/_php_stmt.mdx diff --git a/docs-cn/04-develop/03-insert-data/_py_line.mdx b/docs-cn/07-develop/03-insert-data/_py_line.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_py_line.mdx rename to docs-cn/07-develop/03-insert-data/_py_line.mdx diff --git a/docs-cn/04-develop/03-insert-data/_py_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_py_opts_json.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_py_opts_json.mdx rename to docs-cn/07-develop/03-insert-data/_py_opts_json.mdx diff --git a/docs-cn/04-develop/03-insert-data/_py_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_py_opts_telnet.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_py_opts_telnet.mdx rename to docs-cn/07-develop/03-insert-data/_py_opts_telnet.mdx diff --git a/docs-cn/04-develop/03-insert-data/_py_sql.mdx b/docs-cn/07-develop/03-insert-data/_py_sql.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_py_sql.mdx rename to docs-cn/07-develop/03-insert-data/_py_sql.mdx diff --git a/docs-cn/04-develop/03-insert-data/_py_stmt.mdx b/docs-cn/07-develop/03-insert-data/_py_stmt.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_py_stmt.mdx rename to docs-cn/07-develop/03-insert-data/_py_stmt.mdx diff --git a/docs-cn/04-develop/03-insert-data/_rust_line.mdx b/docs-cn/07-develop/03-insert-data/_rust_line.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_rust_line.mdx rename to docs-cn/07-develop/03-insert-data/_rust_line.mdx diff --git a/docs-cn/04-develop/03-insert-data/_rust_opts_json.mdx b/docs-cn/07-develop/03-insert-data/_rust_opts_json.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_rust_opts_json.mdx rename to docs-cn/07-develop/03-insert-data/_rust_opts_json.mdx diff --git a/docs-cn/04-develop/03-insert-data/_rust_opts_telnet.mdx b/docs-cn/07-develop/03-insert-data/_rust_opts_telnet.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_rust_opts_telnet.mdx rename to docs-cn/07-develop/03-insert-data/_rust_opts_telnet.mdx diff --git a/docs-cn/04-develop/03-insert-data/_rust_sql.mdx b/docs-cn/07-develop/03-insert-data/_rust_sql.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_rust_sql.mdx rename to docs-cn/07-develop/03-insert-data/_rust_sql.mdx diff --git a/docs-cn/04-develop/03-insert-data/_rust_stmt.mdx b/docs-cn/07-develop/03-insert-data/_rust_stmt.mdx similarity index 100% rename from docs-cn/04-develop/03-insert-data/_rust_stmt.mdx rename to docs-cn/07-develop/03-insert-data/_rust_stmt.mdx diff --git a/docs-cn/04-develop/03-insert-data/index.md b/docs-cn/07-develop/03-insert-data/index.md similarity index 100% rename from docs-cn/04-develop/03-insert-data/index.md rename to docs-cn/07-develop/03-insert-data/index.md diff --git a/docs-cn/04-develop/04-query-data/_c.mdx b/docs-cn/07-develop/04-query-data/_c.mdx similarity index 100% rename from docs-cn/04-develop/04-query-data/_c.mdx rename to docs-cn/07-develop/04-query-data/_c.mdx diff --git a/docs-cn/04-develop/04-query-data/_c_async.mdx b/docs-cn/07-develop/04-query-data/_c_async.mdx similarity index 100% rename from docs-cn/04-develop/04-query-data/_c_async.mdx rename to docs-cn/07-develop/04-query-data/_c_async.mdx diff --git a/docs-cn/04-develop/04-query-data/_category_.yml b/docs-cn/07-develop/04-query-data/_category_.yml similarity index 100% rename from docs-cn/04-develop/04-query-data/_category_.yml rename to docs-cn/07-develop/04-query-data/_category_.yml diff --git a/docs-cn/04-develop/04-query-data/_cs.mdx b/docs-cn/07-develop/04-query-data/_cs.mdx similarity index 100% rename from docs-cn/04-develop/04-query-data/_cs.mdx rename to docs-cn/07-develop/04-query-data/_cs.mdx diff --git a/docs-cn/04-develop/04-query-data/_cs_async.mdx b/docs-cn/07-develop/04-query-data/_cs_async.mdx similarity index 100% rename from docs-cn/04-develop/04-query-data/_cs_async.mdx rename to docs-cn/07-develop/04-query-data/_cs_async.mdx diff --git a/docs-cn/04-develop/04-query-data/_go.mdx b/docs-cn/07-develop/04-query-data/_go.mdx similarity index 100% rename from docs-cn/04-develop/04-query-data/_go.mdx rename to docs-cn/07-develop/04-query-data/_go.mdx diff --git a/docs-cn/04-develop/04-query-data/_go_async.mdx b/docs-cn/07-develop/04-query-data/_go_async.mdx similarity index 100% rename from docs-cn/04-develop/04-query-data/_go_async.mdx rename to docs-cn/07-develop/04-query-data/_go_async.mdx diff --git a/docs-cn/04-develop/04-query-data/_java.mdx b/docs-cn/07-develop/04-query-data/_java.mdx similarity index 100% rename from docs-cn/04-develop/04-query-data/_java.mdx rename to docs-cn/07-develop/04-query-data/_java.mdx diff --git a/docs-cn/04-develop/04-query-data/_js.mdx b/docs-cn/07-develop/04-query-data/_js.mdx similarity index 100% rename from docs-cn/04-develop/04-query-data/_js.mdx rename to docs-cn/07-develop/04-query-data/_js.mdx diff --git a/docs-cn/04-develop/04-query-data/_js_async.mdx b/docs-cn/07-develop/04-query-data/_js_async.mdx similarity index 100% rename from docs-cn/04-develop/04-query-data/_js_async.mdx rename to docs-cn/07-develop/04-query-data/_js_async.mdx diff --git a/docs-cn/04-develop/04-query-data/_php.mdx b/docs-cn/07-develop/04-query-data/_php.mdx similarity index 100% rename from docs-cn/04-develop/04-query-data/_php.mdx rename to docs-cn/07-develop/04-query-data/_php.mdx diff --git a/docs-cn/04-develop/04-query-data/_py.mdx b/docs-cn/07-develop/04-query-data/_py.mdx similarity index 100% rename from docs-cn/04-develop/04-query-data/_py.mdx rename to docs-cn/07-develop/04-query-data/_py.mdx diff --git a/docs-cn/04-develop/04-query-data/_py_async.mdx b/docs-cn/07-develop/04-query-data/_py_async.mdx similarity index 100% rename from docs-cn/04-develop/04-query-data/_py_async.mdx rename to docs-cn/07-develop/04-query-data/_py_async.mdx diff --git a/docs-cn/04-develop/04-query-data/_rust.mdx b/docs-cn/07-develop/04-query-data/_rust.mdx similarity index 100% rename from docs-cn/04-develop/04-query-data/_rust.mdx rename to docs-cn/07-develop/04-query-data/_rust.mdx diff --git a/docs-cn/04-develop/04-query-data/index.mdx b/docs-cn/07-develop/04-query-data/index.mdx similarity index 94% rename from docs-cn/04-develop/04-query-data/index.mdx rename to docs-cn/07-develop/04-query-data/index.mdx index b0a6bad3eaad174a97d8dce4e1ba0125cbf5dc03..824f36ef2f98aac227bdcaf2016d7be0a2e59328 100644 --- a/docs-cn/04-develop/04-query-data/index.mdx +++ b/docs-cn/07-develop/04-query-data/index.mdx @@ -50,14 +50,14 @@ Query OK, 2 row(s) in set (0.001100s) ### 示例一 -在 TAOS Shell,查找北京所有智能电表采集的电压平均值,并按照 location 分组。 +在 TAOS Shell,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。 ``` taos> SELECT AVG(voltage) FROM meters GROUP BY location; avg(voltage) | location | ============================================================= - 222.000000000 | Beijing.Haidian | - 219.200000000 | Beijing.Chaoyang | + 222.000000000 | California.LosAngeles | + 219.200000000 | California.SanFrancisco | Query OK, 2 row(s) in set (0.002136s) ``` @@ -88,10 +88,10 @@ taos> SELECT sum(current) FROM d1001 INTERVAL(10s); Query OK, 2 row(s) in set (0.000883s) ``` -降采样操作也适用于超级表,比如:将北京所有智能电表采集的电流值每秒钟求和 +降采样操作也适用于超级表,比如:将加利福尼亚州所有智能电表采集的电流值每秒钟求和 ``` -taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s); +taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s); ts | sum(current) | ====================================================== 2018-10-03 14:38:04.000 | 10.199999809 | diff --git a/docs-cn/04-develop/05-continuous-query.mdx b/docs-cn/07-develop/05-continuous-query.mdx similarity index 97% rename from docs-cn/04-develop/05-continuous-query.mdx rename to docs-cn/07-develop/05-continuous-query.mdx index 2fd1b3cc755188f513fe511541a84efa3558d3ea..b2223d15e33114d263b9833df51e4201bc01c772 100644 --- a/docs-cn/04-develop/05-continuous-query.mdx +++ b/docs-cn/07-develop/05-continuous-query.mdx @@ -34,8 +34,8 @@ SLIDING: 连续查询的时间窗口向前滑动的时间间隔 ```sql create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int); -create table D1001 using meters tags ("Beijing.Chaoyang", 2); -create table D1002 using meters tags ("Beijing.Haidian", 2); +create table D1001 using meters tags ("California.SanFrancisco", 2); +create table D1002 using meters tags ("California.LosAngeles", 2); ... ``` diff --git a/docs-cn/04-develop/06-subscribe.mdx b/docs-cn/07-develop/06-subscribe.mdx similarity index 91% rename from docs-cn/04-develop/06-subscribe.mdx rename to docs-cn/07-develop/06-subscribe.mdx index d471c114e827d7c4b40195c2c1b3c8f6a9d26ed4..0f531e07c9dce7dbb03bacebf8e5cbefae82671f 100644 --- a/docs-cn/04-develop/06-subscribe.mdx +++ b/docs-cn/07-develop/06-subscribe.mdx @@ -145,7 +145,7 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { taos_unsubscribe(tsub, keep); ``` -其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下,每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。 +其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下(注:`taos.cfg` 配置文件中 `DataDir` 参数值默认为 **/var/lib/taos/**,但是 Windows 服务器上本身不存在该目录,所以需要在 Windows 的配置文件中修改 `DataDir` 参数值为相应的已存在目录"),每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。 代码介绍完毕,我们来看一下实际的运行效果。假设: @@ -184,8 +184,8 @@ taos> use power; # create super table "meters" taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int); # create tabes using the schema defined by super table "meters" -taos> create table d1001 using meters tags ("Beijing.Chaoyang", 2); -taos> create table d1002 using meters tags ("Beijing.Haidian", 2); +taos> create table d1001 using meters tags ("California.SanFrancisco", 2); +taos> create table d1002 using meters tags ("California.LosAngeles", 2); # insert some rows taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); @@ -193,27 +193,28 @@ taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08 taos> select * from meters where current > 10; ts | current | voltage | phase | location | groupid | =========================================================================================================== - 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 | - 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 | - 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 | - 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 | - 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 | + 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LosAngeles | 2 | + 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LosAngeles | 2 | + 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 | + 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 | + 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 | Query OK, 5 row(s) in set (0.004896s) ``` + ### 示例代码 - + - + {/* */} - + {/* @@ -222,20 +223,20 @@ Query OK, 5 row(s) in set (0.004896s) */} - - + + ### 运行示例程序 - + 示例程序会先消费符合查询条件的所有历史数据: ```bash -ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2 -ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2 -ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2 -ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2 -ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2 +ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 +ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2 +ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 +ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2 +ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2 ``` 接着,使用 TDengine CLI 向表中新增一条数据: @@ -249,5 +250,5 @@ taos> insert into d1001 values(now, 12.4, 220, 1); 因为这条数据的电流大于 10A,示例程序会将其消费: ``` -ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2 +ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2 ``` diff --git a/docs-cn/04-develop/07-cache.md b/docs-cn/07-develop/07-cache.md similarity index 93% rename from docs-cn/04-develop/07-cache.md rename to docs-cn/07-develop/07-cache.md index fd31335310d62d792e5173e38a9aa778ee6c6c60..cc59c0353c0d12fb7a8f0f20254087d741361031 100644 --- a/docs-cn/04-develop/07-cache.md +++ b/docs-cn/07-develop/07-cache.md @@ -1,6 +1,6 @@ --- sidebar_label: 缓存 -title: 缓存 +title: 缓存 description: "提供写驱动的缓存管理机制,将每个表最近写入的一条记录持续保存在缓存中,可以提供高性能的最近状态查询。" --- @@ -15,7 +15,7 @@ TDengine 将内存池按块划分进行管理,数据在内存块里是以行 你可以通过函数 last_row() 快速获取一张表或一张超级表的最后一条记录,这样很便于在大屏显示各设备的实时状态或采集值。例如: ```sql -select last_row(voltage) from meters where location='Beijing.Chaoyang'; +select last_row(voltage) from meters where location='California.SanFrancisco'; ``` -该 SQL 语句将获取所有位于北京朝阳区的电表最后记录的电压值。 +该 SQL 语句将获取所有位于加利福尼亚州旧金山市的电表最后记录的电压值。 diff --git a/docs-cn/04-develop/08-udf.md b/docs-cn/07-develop/08-udf.md similarity index 100% rename from docs-cn/04-develop/08-udf.md rename to docs-cn/07-develop/08-udf.md diff --git a/docs-cn/04-develop/_category_.yml b/docs-cn/07-develop/_category_.yml similarity index 100% rename from docs-cn/04-develop/_category_.yml rename to docs-cn/07-develop/_category_.yml diff --git a/docs-cn/04-develop/_sub_c.mdx b/docs-cn/07-develop/_sub_c.mdx similarity index 100% rename from docs-cn/04-develop/_sub_c.mdx rename to docs-cn/07-develop/_sub_c.mdx diff --git a/docs-cn/04-develop/_sub_cs.mdx b/docs-cn/07-develop/_sub_cs.mdx similarity index 100% rename from docs-cn/04-develop/_sub_cs.mdx rename to docs-cn/07-develop/_sub_cs.mdx diff --git a/docs-cn/04-develop/_sub_go.mdx b/docs-cn/07-develop/_sub_go.mdx similarity index 100% rename from docs-cn/04-develop/_sub_go.mdx rename to docs-cn/07-develop/_sub_go.mdx diff --git a/docs-cn/04-develop/_sub_java.mdx b/docs-cn/07-develop/_sub_java.mdx similarity index 100% rename from docs-cn/04-develop/_sub_java.mdx rename to docs-cn/07-develop/_sub_java.mdx diff --git a/docs-cn/04-develop/_sub_node.mdx b/docs-cn/07-develop/_sub_node.mdx similarity index 100% rename from docs-cn/04-develop/_sub_node.mdx rename to docs-cn/07-develop/_sub_node.mdx diff --git a/docs-cn/04-develop/_sub_python.mdx b/docs-cn/07-develop/_sub_python.mdx similarity index 100% rename from docs-cn/04-develop/_sub_python.mdx rename to docs-cn/07-develop/_sub_python.mdx diff --git a/docs-cn/04-develop/_sub_rust.mdx b/docs-cn/07-develop/_sub_rust.mdx similarity index 100% rename from docs-cn/04-develop/_sub_rust.mdx rename to docs-cn/07-develop/_sub_rust.mdx diff --git a/docs-cn/04-develop/index.md b/docs-cn/07-develop/index.md similarity index 100% rename from docs-cn/04-develop/index.md rename to docs-cn/07-develop/index.md diff --git a/docs-cn/10-cluster/01-deploy.md b/docs-cn/10-cluster/01-deploy.md index cee140c0ec13bc9c8052a599a2147acc1aa15a8d..b44d2942f2e4672ef6060aa9d084db1d3342e1c8 100644 --- a/docs-cn/10-cluster/01-deploy.md +++ b/docs-cn/10-cluster/01-deploy.md @@ -22,7 +22,7 @@ title: 集群部署 ### 第二步 -建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042 的 TCP 和 UDP 端口都是开放的。强烈建议先关闭防火墙,集群搭建完毕之后,再来配置端口; +确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。 ### 第三步 diff --git a/docs-cn/10-cluster/index.md b/docs-cn/10-cluster/index.md index 2e751cea4c219af0dbcdf7935cf8fc6e9fc611d5..ef2a7253c977cbdbd101ba6af5d7e1584aaf34bd 100644 --- a/docs-cn/10-cluster/index.md +++ b/docs-cn/10-cluster/index.md @@ -2,9 +2,9 @@ title: 集群管理 --- -TDengine 支持集群,提供水平扩展的能力。如果需要获得更高的处理能力,只需要多增加节点即可。TDengine 采用虚拟节点技术,将一个节点虚拟化为多个虚拟节点,以实现负载均衡。同时,TDengine可以将多个节点上的虚拟节点组成虚拟节点组,通过多副本机制,以保证供系统的高可用。 +TDengine 支持集群,提供水平扩展的能力。如果需要获得更高的处理能力,只需要多增加节点即可。TDengine 采用虚拟节点技术,将一个节点虚拟化为多个虚拟节点,以实现负载均衡。同时,TDengine可以将多个节点上的虚拟节点组成虚拟节点组,通过多副本机制,以保证供系统的高可用。TDengine的集群功能完全开源。 -TDengine的集群功能完全开源。 +本章节主要介绍集群的部署、维护,以及如何实现高可用和负载均衡。 ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docs-cn/12-taos-sql/02-database.md b/docs-cn/12-taos-sql/02-database.md index 6ea8b1568e1ae46b710e45df98b6e91fc826dbaa..566fec324148fede8d897869656b83e657569f59 100644 --- a/docs-cn/12-taos-sql/02-database.md +++ b/docs-cn/12-taos-sql/02-database.md @@ -20,25 +20,33 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; 3. 数据库名最大长度为 33; 4. 一条 SQL 语句的最大长度为 65480 个字符; 5. 创建数据库时可用的参数有: - - cache: [Description](/reference/config/#cache) - - blocks: [Description](/reference/config/#blocks) - - days: [Description](/reference/config/#days) - - keep: [Description](/reference/config/#keep) - - minRows: [Description](/reference/config/#minrows) - - maxRows: [Description](/reference/config/#maxrows) - - wal: [Description](/reference/config/#wallevel) - - fsync: [Description](/reference/config/#fsync) - - update: [Description](/reference/config/#update) - - cacheLast: [Description](/reference/config/#cachelast) - - replica: [Description](/reference/config/#replica) - - quorum: [Description](/reference/config/#quorum) - - maxVgroupsPerDb: [Description](/reference/config/#maxvgroupsperdb) - - comp: [Description](/reference/config/#comp) - - precision: [Description](reference/config/#precision) + - cache: [详细说明](/reference/config/#cache) + - blocks: [详细说明](/reference/config/#blocks) + - days: [详细说明](/reference/config/#days) + - keep: [详细说明](/reference/config/#keep) + - minRows: [详细说明](/reference/config/#minrows) + - maxRows: [详细说明](/reference/config/#maxrows) + - wal: [详细说明](/reference/config/#wallevel) + - fsync: [详细说明](/reference/config/#fsync) + - update: [详细说明](/reference/config/#update) + - cacheLast: [详细说明](/reference/config/#cachelast) + - replica: [详细说明](/reference/config/#replica) + - quorum: [详细说明](/reference/config/#quorum) + - maxVgroupsPerDb: [详细说明](/reference/config/#maxvgroupsperdb) + - comp: [详细说明](/reference/config/#comp) + - precision: [详细说明](/reference/config/#precision) 6. 请注意上面列出的所有参数都可以配置在配置文件 `taosd.cfg` 中作为创建数据库时使用的默认配置, `create database` 的参数中明确指定的会覆盖配置文件中的设置。 ::: +### 创建数据库示例 + +创建时间精度为纳秒的数据库, 保留 1 年数据: + +```sql +CREATE DATABASE test PRECISION 'ns' KEEP 365; +``` + ## 显示系统当前参数 ``` @@ -102,7 +110,7 @@ CACHELAST 参数控制是否在内存中缓存子表的最近数据。缺省值 :::tip 以上所有参数修改后都可以用 show databases 来确认是否修改成功。另外,从 2.1.3.0 版本开始,修改这些参数后无需重启服务器即可生效。 -:::tip +::: ## 显示系统所有数据库 diff --git a/docs-cn/12-taos-sql/03-table.md b/docs-cn/12-taos-sql/03-table.md index 675c157b3def0d670f771f55b767f3ca4f2a28af..d7235f312933ec46ed427d5da7e2c5a229fa2926 100644 --- a/docs-cn/12-taos-sql/03-table.md +++ b/docs-cn/12-taos-sql/03-table.md @@ -12,7 +12,7 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam 1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键; 2. 表名最大长度为 192; -3. 表的每行长度不能超过 16k 个字符;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) +3. 表的每行长度不能超过 48KB;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) 4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写 5. 使用数据类型 binary 或 nchar,需指定其最长的字节数,如 binary(20),表示 20 字节; 6. 为了兼容支持更多形式的表名,TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 diff --git a/docs-cn/12-taos-sql/04-stable.md b/docs-cn/12-taos-sql/04-stable.md index a3c227317c85917b64b2477994d335710610ec70..3901427736e80bc8dd0dd87b454947af6e586561 100644 --- a/docs-cn/12-taos-sql/04-stable.md +++ b/docs-cn/12-taos-sql/04-stable.md @@ -86,7 +86,7 @@ ALTER STABLE stb_name MODIFY COLUMN field_name data_type(length); ALTER STABLE stb_name ADD TAG new_tag_name tag_type; ``` -为 STable 增加一个新的标签,并指定新标签的类型。标签总数不能超过 128 个,总长度不超过 16k 个字符。 +为 STable 增加一个新的标签,并指定新标签的类型。标签总数不能超过 128 个,总长度不超过 16KB 。 ### 删除标签 diff --git a/docs-cn/12-taos-sql/05-insert.md b/docs-cn/12-taos-sql/05-insert.md index e542e442b78c9033ae37196f4913a7c67fb19d8b..04118303f3f6517d65d8ecbbe9fdeb774a3177b7 100644 --- a/docs-cn/12-taos-sql/05-insert.md +++ b/docs-cn/12-taos-sql/05-insert.md @@ -67,7 +67,7 @@ INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07- 如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。例如: ``` -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32); +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32); ``` 也可以在自动建表时,只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。例如: @@ -79,7 +79,7 @@ INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33. 自动建表语法也支持在一条语句中向多个表插入记录。例如: ``` -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33) d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31); ``` @@ -108,13 +108,13 @@ INSERT INTO d1001 FILE '/tmp/csvfile.csv'; 从 2.1.5.0 版本开始,支持在插入来自 CSV 文件的数据时,以超级表为模板来自动创建不存在的数据表。例如: ``` -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile.csv'; +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile.csv'; ``` 也可以在一条语句中向多个表以自动建表的方式插入记录。例如: ``` -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile_21001.csv' +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv' d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; ``` @@ -137,7 +137,7 @@ Query OK, 1 row(s) in set (0.001029s) taos> SHOW TABLES; Query OK, 0 row(s) in set (0.000946s) -taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a'); +taos> INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('a'); DB error: invalid SQL: 'a' (invalid timestamp) (0.039494s) diff --git a/docs-cn/12-taos-sql/06-select.md b/docs-cn/12-taos-sql/06-select.md index 3a860119cfe664f9ac3b0ebd046b5f4f0a612118..92abc4344b7562842fae71a84fe0cb9a168596ed 100644 --- a/docs-cn/12-taos-sql/06-select.md +++ b/docs-cn/12-taos-sql/06-select.md @@ -40,15 +40,15 @@ Query OK, 3 row(s) in set (0.001165s) taos> SELECT * FROM meters; ts | current | voltage | phase | location | groupid | ===================================================================================================================================== - 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | Beijing.Haidian | 2 | - 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | Beijing.Haidian | 2 | - 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | Beijing.Haidian | 3 | - 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | Beijing.Haidian | 3 | - 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | Beijing.Chaoyang | 3 | - 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | Beijing.Chaoyang | 3 | - 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | Beijing.Chaoyang | 2 | - 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | Beijing.Chaoyang | 2 | - 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | Beijing.Chaoyang | 2 | + 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | California.LosAngeles | 2 | + 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | California.LosAngeles | 2 | + 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | California.LosAngeles | 3 | + 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | California.LosAngeles | 3 | + 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | California.SanFrancisco | 3 | + 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | California.SanFrancisco | 3 | + 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | California.SanFrancisco | 2 | + 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | California.SanFrancisco | 2 | + 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | California.SanFrancisco | 2 | Query OK, 9 row(s) in set (0.002022s) ``` @@ -104,8 +104,8 @@ Query OK, 1 row(s) in set (0.000849s) taos> SELECT location, groupid, current FROM d1001 LIMIT 2; location | groupid | current | ====================================================================== - Beijing.Chaoyang | 2 | 10.30000 | - Beijing.Chaoyang | 2 | 12.60000 | + California.SanFrancisco | 2 | 10.30000 | + California.SanFrancisco | 2 | 12.60000 | Query OK, 2 row(s) in set (0.003112s) ``` @@ -284,10 +284,10 @@ SELECT COUNT(TBNAME) FROM meters; taos> SELECT TBNAME, location FROM meters; tbname | location | ================================================================== - d1004 | Beijing.Haidian | - d1003 | Beijing.Haidian | - d1002 | Beijing.Chaoyang | - d1001 | Beijing.Chaoyang | + d1004 | California.LosAngeles | + d1003 | California.LosAngeles | + d1002 | California.SanFrancisco | + d1001 | California.SanFrancisco | Query OK, 4 row(s) in set (0.000881s) taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; @@ -327,15 +327,15 @@ Query OK, 1 row(s) in set (0.001091s) - <\> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。 - like 算子使用通配符字符串进行匹配检查。 - - 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意 ASCII 字符。 - - 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.2.0.0 版本开始支持) - - 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) + - 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意 ASCII 字符。 + - 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.2.0.0 版本开始支持) + - 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) - 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 - - 从 2.3.0.0 版本开始,已支持完整的同一列和/或不同列间的 AND/OR 运算。 + - 从 2.3.0.0 版本开始,已支持完整的同一列和/或不同列间的 AND/OR 运算。 - 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 - - 从 2.3.0.0 版本开始,允许使用多个时间过滤条件,但首列时间戳的过滤运算结果只能包含一个区间。 + - 从 2.3.0.0 版本开始,允许使用多个时间过滤条件,但首列时间戳的过滤运算结果只能包含一个区间。 - 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 -- 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 +- 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('California.SanFrancisco', 'California.SanDieo')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 - 从 2.3.0.0 版本开始,条件过滤开始支持正则表达式,关键字 match/nmatch,不区分大小写。 ## 正则表达式过滤 @@ -380,7 +380,7 @@ WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; :::note -JOIN语句存在如下限制要求: +JOIN 语句存在如下限制要求: - 参与一条语句中 JOIN 操作的表/超级表最多可以有 10 个。 - 在包含 JOIN 操作的查询语句中不支持 FILL。 @@ -409,13 +409,13 @@ SELECT ... FROM (SELECT ... FROM ...) ...; - 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。 - 目前内层查询、外层查询均不支持 UNION 操作。 - 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。 - - 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。 + - 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。 - 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制: - - 计算函数部分: - - 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。 - - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。 - - 外层查询中不支持 IN 算子,但在内层中可以使用。 - - 外层查询不支持 GROUP BY。 + - 计算函数部分: + - 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。 + - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。 + - 外层查询中不支持 IN 算子,但在内层中可以使用。 + - 外层查询不支持 GROUP BY。 ::: diff --git a/docs-cn/12-taos-sql/07-function.md b/docs-cn/12-taos-sql/07-function.md index f6e564419ddaa18931b0f0e0e4e7b5b3219a92f6..2349e6aa3c02eb62fba1fc7e4eef15e08e3924d1 100644 --- a/docs-cn/12-taos-sql/07-function.md +++ b/docs-cn/12-taos-sql/07-function.md @@ -261,6 +261,92 @@ taos> select hyperloglog(dbig) from shll; Query OK, 1 row(s) in set (0.008388s) ``` +### HISTOGRAM + +``` +SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause]; +``` + +**功能说明**:统计数据按照用户指定区间的分布。 + +**返回结果类型**:如归一化参数 normalized 设置为 1,返回结果为双精度浮点类型 DOUBLE,否则为长整形 INT64。 + +**应用字段**:数值型字段。 + +**支持的版本**:2.6.0.0 及以后的版本。 + +**适用于**: 表和超级表。 + +**说明**: +1. bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。 +2. bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): + - "user_input": "[1, 3, 5, 7]" + 用户指定 bin 的具体数值。 + + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" + "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点, + 生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。 + + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" + "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点, + 生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。 +3. normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。 + +**示例**: + +```mysql +taos> SELECT HISTOGRAM(voltage, "user_input", "[1,3,5,7]", 1) FROM meters; + histogram(voltage, "user_input", "[1,3,5,7]", 1) | + ======================================================= + {"lower_bin":1, "upper_bin":3, "count":0.333333} | + {"lower_bin":3, "upper_bin":5, "count":0.333333} | + {"lower_bin":5, "upper_bin":7, "count":0.333333} | + Query OK, 3 row(s) in set (0.004273s) + +taos> SELECT HISTOGRAM(voltage, 'linear_bin', '{"start": 1, "width": 3, "count": 3, "infinity": false}', 0) FROM meters; + histogram(voltage, 'linear_bin', '{"start": 1, "width": 3, " | + =================================================================== + {"lower_bin":1, "upper_bin":4, "count":3} | + {"lower_bin":4, "upper_bin":7, "count":3} | + {"lower_bin":7, "upper_bin":10, "count":3} | + Query OK, 3 row(s) in set (0.004887s) + +taos> SELECT HISTOGRAM(voltage, 'log_bin', '{"start": 1, "factor": 3, "count": 3, "infinity": true}', 0) FROM meters; + histogram(voltage, 'log_bin', '{"start": 1, "factor": 3, "count" | + =================================================================== + {"lower_bin":-inf, "upper_bin":1, "count":3} | + {"lower_bin":1, "upper_bin":3, "count":2} | + {"lower_bin":3, "upper_bin":9, "count":6} | + {"lower_bin":9, "upper_bin":27, "count":3} | + {"lower_bin":27, "upper_bin":inf, "count":1} | +``` + +### ELAPSED + +```mysql +SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; +``` + +**功能说明**:elapsed函数表达了统计周期内连续的时间长度,和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有INTERVAL子句,则返回整个给定时间范围内的有数据覆盖的时间范围。注意,ELAPSED返回的并不是时间范围的绝对值,而是绝对值除以time_unit所得到的单位个数。 + +**返回结果类型**:Double + +**应用字段**:Timestamp类型 + +**支持的版本**:2.6.0.0 及以后的版本。 + +**适用于**: 表,超级表,嵌套查询的外层查询 + +**说明**: +- field_name参数只能是表的第一列,即timestamp主键列。 +- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit参数未指定时,以数据库的时间分辨率为时间单位。 +- 可以和interval组合使用,返回每个时间窗口的时间戳差值。需要特别注意的是,除第一个时间窗口和最后一个时间窗口外,中间窗口的时间戳差值均为窗口长度。 +- order by asc/desc不影响差值的计算结果。 +- 对于超级表,需要和group by tbname子句组合使用,不可以直接使用。 +- 对于普通表,不支持和group by子句组合使用。 +- 对于嵌套查询,仅当内层查询会输出隐式时间戳列时有效。例如select elapsed(ts) from (select diff(value) from sub1)语句,diff函数会让内层查询输出隐式时间戳列,此为主键列,可以用于elapsed函数的第一个参数。相反,例如select elapsed(ts) from (select * from sub1) 语句,ts列输出到外层时已经没有了主键列的含义,无法使用elapsed函数。此外,elapsed函数作为一个与时间线强依赖的函数,形如select elapsed(ts) from (select diff(value) from st group by tbname)尽管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。 +- 不支持与leastsquares、diff、derivative、top、bottom、last_row、interp等函数混合使用。 + ## 选择函数 在使用所有的选择函数的时候,可以同时指定输出 ts 列或标签列(包括 tbname),这样就可以方便地知道被选出的值是源于哪个数据行的。 @@ -698,7 +784,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; ``` -**功能说明**:返回跳过最后 offset_value 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。 +**功能说明**:返回跳过最后 offset_val 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。 **参数范围**:k: [1,100] offset_val: [0,100]。 @@ -1766,6 +1852,8 @@ SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。 - 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。 +**支持的版本**:2.6.0.0 及以后的版本。 + **示例**: ```sql diff --git a/docs-cn/12-taos-sql/08-interval.md b/docs-cn/12-taos-sql/08-interval.md index d62e11b0dbd0ba49ceedb3807e05361f060969b3..b0619ea5ce3759e9bca1234b76e2a16176511547 100644 --- a/docs-cn/12-taos-sql/08-interval.md +++ b/docs-cn/12-taos-sql/08-interval.md @@ -11,7 +11,7 @@ TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如 INTERVAL 子句用于产生相等时间周期的窗口,SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window )大小和每次前向增量时间(forward sliding times)。如图,[t0s, t0e] ,[t1s , t1e], [t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。 -![时间窗口示意图](/img/sql/timewindow-1.png) +![TDengine Database 时间窗口示意图](./timewindow-1.webp) INTERVAL 和 SLIDING 子句需要配合聚合和选择函数来使用。以下 SQL 语句非法: @@ -33,7 +33,7 @@ _ 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用) -![时间窗口示意图](/img/sql/timewindow-3.png) +![TDengine Database 时间窗口示意图](./timewindow-3.webp) 使用 STATE_WINDOW 来确定状态窗口划分的列。例如: @@ -45,7 +45,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); 会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30]和[2019-04-28 14:23:10,2019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒,超过了连续时间间隔(12 秒)。 -![时间窗口示意图](/img/sql/timewindow-2.png) +![TDengine Database 时间窗口示意图](./timewindow-2.webp) 在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用) diff --git a/docs-cn/12-taos-sql/09-limit.md b/docs-cn/12-taos-sql/09-limit.md index 3c86a3862174377e6a00d046fb69627c773fe76e..7673e24a83cc1ba5335b11f29803cf9f3eae26e5 100644 --- a/docs-cn/12-taos-sql/09-limit.md +++ b/docs-cn/12-taos-sql/09-limit.md @@ -7,9 +7,9 @@ title: 边界限制 - 数据库名最大长度为 32。 - 表名最大长度为 192,不包括数据库名前缀和分隔符 -- 每行数据最大长度 16k 个字符, 从 2.1.7.0 版本开始,每行数据最大长度 48k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 +- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 - 列名最大长度为 64,最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。注:从 2.1.7.0 版本(不含)以前最多允许 4096 列 -- 标签名最大长度为 64,最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16k 个字符。 +- 标签名最大长度为 64,最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB 。 - SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576。 - SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。注: 2.1.7.0 版本(不含)之前为最多允许 1024 列 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。 diff --git a/docs-cn/12-taos-sql/12-keywords/index.md b/docs-cn/12-taos-sql/12-keywords/index.md index 608d4e080967cfd97072706cf0963ae669960be6..0b9ec4de862fc6b6ade11e733a0f7b169a79a324 100644 --- a/docs-cn/12-taos-sql/12-keywords/index.md +++ b/docs-cn/12-taos-sql/12-keywords/index.md @@ -23,17 +23,17 @@ title: TDengine 参数限制与保留关键字 去掉了 `` ‘“`\ `` (单双引号、撇号、反斜杠、空格) - 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符 -- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符 -- 表的列名:不能包含特殊字符,不能超过 64 个字符 +- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字节 ,每行数据最大长度 48KB +- 表的列名:不能包含特殊字符,不能超过 64 个字节 - 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线” - 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列) -- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置) -- 单条 SQL 语句默认最大字符串长度:1048576 byte,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 byte +- 记录的最大长度:包括时间戳 8 字节,不能超过 48KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 字节 的存储位置) +- 单条 SQL 语句默认最大字符串长度:1048576 字节,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 字节 - 数据库副本数:不能超过 3 -- 用户名:不能超过 23 个 byte -- 用户密码:不能超过 15 个 byte +- 用户名:不能超过 23 个 字节 +- 用户密码:不能超过 15 个 字节 - 标签(Tags)数量:不能超过 128 个,可以 0 个 -- 标签的总长度:不能超过 16K byte +- 标签的总长度:不能超过 16KB - 记录条数:仅受存储空间限制 - 表的个数:仅受节点个数限制 - 库的个数:仅受节点个数限制 @@ -85,3 +85,44 @@ title: TDengine 参数限制与保留关键字 | CONNECTIONS | HAVING | NOT | SOFFSET | VNODES | | CONNS | ID | NOTNULL | STABLE | WAL | | COPY | IF | NOW | STABLES | WHERE | +| _C0 | _QSTART | _QSTOP | _QDURATION | _WSTART | +| _WSTOP | _WDURATION | + +## 特殊说明 +### TBNAME +`TBNAME` 可以视为超级表中一个特殊的标签,代表子表的表名。 + +获取一个超级表所有的子表名及相关的标签信息: +```mysql +SELECT TBNAME, location FROM meters; + +统计超级表下辖子表数量: +```mysql +SELECT COUNT(TBNAME) FROM meters; +``` + +以上两个查询均只支持在WHERE条件子句中添加针对标签(TAGS)的过滤条件。例如: +```mysql +taos> SELECT TBNAME, location FROM meters; + tbname | location | +================================================================== + d1004 | California.SanFrancisco | + d1003 | California.SanFrancisco | + d1002 | California.LosAngeles | + d1001 | California.LosAngeles | +Query OK, 4 row(s) in set (0.000881s) + +taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; + count(tbname) | +======================== + 2 | +Query OK, 1 row(s) in set (0.001091s) +``` +### _QSTART/_QSTOP/_QDURATION +表示查询过滤窗口的起始,结束以及持续时间 (从2.6.0.0版本开始支持) + +### _WSTART/_WSTOP/_WDURATION +窗口切分聚合查询(例如 interval/session window/state window)中表示每个切分窗口的起始,结束以及持续时间(从 2.6.0.0 版本开始支持) + +### _c0 +表示表或超级表的第一列 \ No newline at end of file diff --git a/docs-cn/12-taos-sql/index.md b/docs-cn/12-taos-sql/index.md index 269bc1d2b5ddfa25c42652d8f639bfe2fb1d42e5..cb01b3a918778abc6c7891c1ff185f1db32d3d36 100644 --- a/docs-cn/12-taos-sql/index.md +++ b/docs-cn/12-taos-sql/index.md @@ -7,8 +7,6 @@ description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQ TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 为了便于用户快速上手,在一定程度上提供与标准 SQL 类似的风格和模式。严格意义上,TAOS SQL 并不是也不试图提供标准的 SQL 语法。此外,由于 TDengine 针对的时序性结构化数据不提供删除功能,因此在 TAO SQL 中不提供数据删除的相关功能。 -TAOS SQL 不支持关键字的缩写,例如 DESCRIBE 不能缩写为 DESC。 - 本章节 SQL 语法遵循如下约定: - <\> 里的内容是用户需要输入的,但不要输入 <\> 本身 @@ -37,4 +35,4 @@ import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-cn/12-taos-sql/timewindow-1.webp b/docs-cn/12-taos-sql/timewindow-1.webp new file mode 100644 index 0000000000000000000000000000000000000000..82747558e96df752a0010d85be79a4af07e4a1df Binary files /dev/null and b/docs-cn/12-taos-sql/timewindow-1.webp differ diff --git a/docs-cn/12-taos-sql/timewindow-2.webp b/docs-cn/12-taos-sql/timewindow-2.webp new file mode 100644 index 0000000000000000000000000000000000000000..8f1314ae34f7f5c5cca1d3cb80455f555fad38c3 Binary files /dev/null and b/docs-cn/12-taos-sql/timewindow-2.webp differ diff --git a/docs-cn/12-taos-sql/timewindow-3.webp b/docs-cn/12-taos-sql/timewindow-3.webp new file mode 100644 index 0000000000000000000000000000000000000000..5bd16e68e7fd5da6805551e9765975277cd5d4d9 Binary files /dev/null and b/docs-cn/12-taos-sql/timewindow-3.webp differ diff --git a/docs-cn/13-operation/11-optimize.md b/docs-cn/13-operation/11-optimize.md index 1ca9e8c44492a5882613a0b55d959d7abca8b5f6..d06c3cb8f5601a241fd63d73ef1a5a6165eb1617 100644 --- a/docs-cn/13-operation/11-optimize.md +++ b/docs-cn/13-operation/11-optimize.md @@ -74,7 +74,7 @@ TDengine 集群中加入一个新的 dnode 时,涉及集群相关的一些参 - offlineThreshold: dnode 离线阈值,超过该时间将导致该 dnode 从集群中删除。单位为秒,默认值:86400\*10(即 10 天)。 - statusInterval: dnode 向 mnode 报告状态时长。单位为秒,默认值:1。 - maxTablesPerVnode: 每个 vnode 中能够创建的最大表个数。默认值:1000000。 -- maxVgroupsPerDb: 每个数据库中能够使用的最大 vgroup 个数。 +- maxVgroupsPerDb: 每个数据库中能够使用的最大 vgroup 个数。0:自动配置为 CPU 的核数。默认值:0。 - arbitrator: 系统中裁决器的 endpoint,缺省为空。 - timezone、locale、charset 的配置见客户端配置。(2.0.20.0 及以上的版本里,集群中加入新节点已不要求 locale 和 charset 参数取值一致) - balance:是否启用负载均衡。0:否,1:是。默认值:1。 diff --git a/docs-cn/13-operation/index.md b/docs-cn/13-operation/index.md index d265ea8ef3f1d18a8ab743fc66cd6dbc05dc60c6..bc06fbdc138ee593c1206475095ef48d32493b37 100644 --- a/docs-cn/13-operation/index.md +++ b/docs-cn/13-operation/index.md @@ -2,9 +2,11 @@ title: 运维指南 --- +本章节主要为系统管理员写的,覆盖安装、下载、数据导入、导出、运行系统的监测、用户管理、连接管理等内容,同时介绍根据业务量,如何做容量规划,系统运行一段时间后,如何做系统优化。 + ```mdx-code-block import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-cn/14-reference/03-connector/03-connector.mdx b/docs-cn/14-reference/03-connector/03-connector.mdx index c0e714f148a7821e070be38a5484484fdd747e9a..7a4a85276ef4bb4ab829250fcf67076962dbb871 100644 --- a/docs-cn/14-reference/03-connector/03-connector.mdx +++ b/docs-cn/14-reference/03-connector/03-connector.mdx @@ -4,7 +4,7 @@ title: 连接器 TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C# 和 Rust 的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。 -![image-connector](/img/connector.png) +![TDengine Database connector architecture](./connector.webp) ## 支持的平台 diff --git a/docs-cn/14-reference/03-connector/connector.webp b/docs-cn/14-reference/03-connector/connector.webp new file mode 100644 index 0000000000000000000000000000000000000000..040cf5c26c726b345b2e0e5363dd3c677bec61be Binary files /dev/null and b/docs-cn/14-reference/03-connector/connector.webp differ diff --git a/docs-cn/14-reference/03-connector/csharp.mdx b/docs-cn/14-reference/03-connector/csharp.mdx index bbefaacb459153ab5116d557fdf1940d487b4bd3..1e23df9286bf0cb3bf1db95e334301c04d01ad04 100644 --- a/docs-cn/14-reference/03-connector/csharp.mdx +++ b/docs-cn/14-reference/03-connector/csharp.mdx @@ -9,16 +9,16 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import Preparition from "./_preparition.mdx" -import CSInsert from "../../04-develop/03-insert-data/_cs_sql.mdx" -import CSInfluxLine from "../../04-develop/03-insert-data/_cs_line.mdx" -import CSOpenTSDBTelnet from "../../04-develop/03-insert-data/_cs_opts_telnet.mdx" -import CSOpenTSDBJson from "../../04-develop/03-insert-data/_cs_opts_json.mdx" -import CSQuery from "../../04-develop/04-query-data/_cs.mdx" -import CSAsyncQuery from "../../04-develop/04-query-data/_cs_async.mdx" +import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx" +import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx" +import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx" +import CSOpenTSDBJson from "../../07-develop/03-insert-data/_cs_opts_json.mdx" +import CSQuery from "../../07-develop/04-query-data/_cs.mdx" +import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx" `TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。 -`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [RESTful APIs](https://docs.taosdata.com//reference/restful-api/) 文档自行编写。 +`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](/reference/rest-api/) 文档自行编写。 本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。 diff --git a/docs-cn/14-reference/03-connector/go.mdx b/docs-cn/14-reference/03-connector/go.mdx index 694dfc2510ca668391dc735bbe99812645c2d7b0..88b09aa5d0b0161973e3e7eabb4cf04357c134f3 100644 --- a/docs-cn/14-reference/03-connector/go.mdx +++ b/docs-cn/14-reference/03-connector/go.mdx @@ -9,11 +9,11 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import Preparition from "./_preparition.mdx" -import GoInsert from "../../04-develop/03-insert-data/_go_sql.mdx" -import GoInfluxLine from "../../04-develop/03-insert-data/_go_line.mdx" -import GoOpenTSDBTelnet from "../../04-develop/03-insert-data/_go_opts_telnet.mdx" -import GoOpenTSDBJson from "../../04-develop/03-insert-data/_go_opts_json.mdx" -import GoQuery from "../../04-develop/04-query-data/_go.mdx" +import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx" +import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx" +import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx" +import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx" +import GoQuery from "../../07-develop/04-query-data/_go.mdx" `driver-go` 是 TDengine 的官方 Go 语言连接器,实现了 Go 语言[ database/sql ](https://golang.org/pkg/database/sql/) 包的接口。Go 开发人员可以通过它开发存取 TDengine 集群数据的应用软件。 diff --git a/docs-cn/14-reference/03-connector/java.mdx b/docs-cn/14-reference/03-connector/java.mdx index 55abf84fd50fe1c4b5b6a07b28731a00d4534a05..267757160634b28ab198ae0fd759188cf4ccc5cc 100644 --- a/docs-cn/14-reference/03-connector/java.mdx +++ b/docs-cn/14-reference/03-connector/java.mdx @@ -11,7 +11,7 @@ import TabItem from '@theme/TabItem'; `taos-jdbcdriver` 是 TDengine 的官方 Java 语言连接器,Java 开发人员可以通过它开发存取 TDengine 数据库的应用软件。`taos-jdbcdriver` 实现了 JDBC driver 标准的接口,并提供两种形式的连接器。一种是通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能,一种是通过 taosAdapter 提供的 REST 接口连接 TDengine 实例(2.4.0.0 及更高版本)。REST 连接实现的功能集合和原生连接有少量不同。 -![tdengine-connector](tdengine-jdbc-connector.png) +![TDengine Database Connector Java](tdengine-jdbc-connector.webp) 上图显示了两种 Java 应用使用连接器访问 TDengine 的两种方式: @@ -208,10 +208,10 @@ url 中的配置参数如下: - 与原生连接方式不同,REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如: ```sql -INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6); +INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); ``` -- 从 taos-jdbcdriver-2.0.36 和 TDengine 2.2.0.0 版本开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('beijing') values(now, 24.6); +- 从 taos-jdbcdriver-2.0.36 和 TDengine 2.2.0.0 版本开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); ::: @@ -563,7 +563,7 @@ public class ParameterBindingDemo { // set table name pstmt.setTableName("t5_" + i); // set tags - pstmt.setTagNString(0, "北京-abc"); + pstmt.setTagNString(0, "California.SanFrancisco"); // set columns ArrayList tsList = new ArrayList<>(); @@ -574,7 +574,7 @@ public class ParameterBindingDemo { ArrayList f1List = new ArrayList<>(); for (int j = 0; j < numOfRow; j++) { - f1List.add("北京-abc"); + f1List.add("California.LosAngeles"); } pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE); @@ -633,7 +633,7 @@ public class SchemalessInsertTest { private static final String host = "127.0.0.1"; private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0"; - private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"Beijing\", \"id\": \"d1001\"}}"; + private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; public static void main(String[] args) throws SQLException { final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; diff --git a/docs-cn/14-reference/03-connector/node.mdx b/docs-cn/14-reference/03-connector/node.mdx index 0afcf2457dfdb11c01657abd983601322899b8fb..9f2bed9e97cb33aeabfce3d69dc3774931b426c0 100644 --- a/docs-cn/14-reference/03-connector/node.mdx +++ b/docs-cn/14-reference/03-connector/node.mdx @@ -9,12 +9,11 @@ import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; import Preparition from "./_preparition.mdx"; -import NodeInsert from "../../04-develop/03-insert-data/_js_sql.mdx"; -import NodeInfluxLine from "../../04-develop/03-insert-data/_js_line.mdx"; -import NodeOpenTSDBTelnet from "../../04-develop/03-insert-data/_js_opts_telnet.mdx"; -import NodeOpenTSDBJson from "../../04-develop/03-insert-data/_js_opts_json.mdx"; -import NodeQuery from "../../04-develop/04-query-data/_js.mdx"; -import NodeAsyncQuery from "../../04-develop/04-query-data/_js_async.mdx"; +import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx"; +import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; +import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; +import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; +import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; `td2.0-connector` 和 `td2.0-rest-connector` 是 TDengine 的官方 Node.js 语言连接器。Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。 @@ -189,14 +188,8 @@ let cursor = conn.cursor(); ### 查询数据 -#### 同步查询 - -#### 异步查询 - - - ## 更多示例程序 | 示例程序 | 示例程序描述 | diff --git a/docs-cn/14-reference/03-connector/rust.mdx b/docs-cn/14-reference/03-connector/rust.mdx index b6aac45c6ab30405190ab3ced39de017033e760a..25a8409b6e6faca651d1eaf3e02fbd4a0199c557 100644 --- a/docs-cn/14-reference/03-connector/rust.mdx +++ b/docs-cn/14-reference/03-connector/rust.mdx @@ -9,11 +9,11 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import Preparition from "./_preparition.mdx" -import RustInsert from "../../04-develop/03-insert-data/_rust_sql.mdx" -import RustInfluxLine from "../../04-develop/03-insert-data/_rust_line.mdx" -import RustOpenTSDBTelnet from "../../04-develop/03-insert-data/_rust_opts_telnet.mdx" -import RustOpenTSDBJson from "../../04-develop/03-insert-data/_rust_opts_json.mdx" -import RustQuery from "../../04-develop/04-query-data/_rust.mdx" +import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx" +import RustInfluxLine from "../../07-develop/03-insert-data/_rust_line.mdx" +import RustOpenTSDBTelnet from "../../07-develop/03-insert-data/_rust_opts_telnet.mdx" +import RustOpenTSDBJson from "../../07-develop/03-insert-data/_rust_opts_json.mdx" +import RustQuery from "../../07-develop/04-query-data/_rust.mdx" [![Crates.io](https://img.shields.io/crates/v/libtaos)](https://crates.io/crates/libtaos) ![Crates.io](https://img.shields.io/crates/d/libtaos) [![docs.rs](https://img.shields.io/docsrs/libtaos)](https://docs.rs/libtaos) diff --git a/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.png b/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.png deleted file mode 100644 index 1cb8401ea30b01d8db652ed4ea70ecc511de7461..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.png and /dev/null differ diff --git a/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.webp b/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.webp new file mode 100644 index 0000000000000000000000000000000000000000..0956d6005ffc5e90727d49d7566158affdda09c2 Binary files /dev/null and b/docs-cn/14-reference/03-connector/tdengine-jdbc-connector.webp differ diff --git a/docs-cn/14-reference/04-taosadapter.md b/docs-cn/14-reference/04-taosadapter.md index 90a31ec94c94559311e2c91cd34f75af7e87e9a0..6e259391d40acfd48d8db8db3246ad2196ce0520 100644 --- a/docs-cn/14-reference/04-taosadapter.md +++ b/docs-cn/14-reference/04-taosadapter.md @@ -24,7 +24,7 @@ taosAdapter 提供以下功能: ## taosAdapter 架构图 -![taosAdapter Architecture](taosAdapter-architecture.png) +![TDengine Database taosAdapter Architecture](taosAdapter-architecture.webp) ## taosAdapter 部署方法 diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png deleted file mode 100644 index 4708f836feb21980f2db7fed4a55f799b23a6ec1..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp new file mode 100644 index 0000000000000000000000000000000000000000..a78e18028a94c2f6a783b08d992a25c791527407 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png deleted file mode 100644 index f2684e6eed70e8f56697eae42b495d6bd62815e8..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp new file mode 100644 index 0000000000000000000000000000000000000000..b152418d0902b8ebdf62ebce6705c10dd5ab4fbf Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png deleted file mode 100644 index 74686691e4106b8646c3deee1e0ce73b2f53f1ea..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp new file mode 100644 index 0000000000000000000000000000000000000000..f58f48b7f17375cb8e62e7c0126ca3aea56a13f6 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.png deleted file mode 100644 index 27964215567f9f961c0aeaf1b863188437008fb7..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp new file mode 100644 index 0000000000000000000000000000000000000000..00afcce013602dce0da17bfd033f65aaa8e43bb7 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.png deleted file mode 100644 index b0d3abbf21ec4d4bd7bfb95fcc03a5f936b22665..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.webp new file mode 100644 index 0000000000000000000000000000000000000000..567e5694f9d7a035a3eb354493d3df8ed64db251 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-5-database.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png deleted file mode 100644 index 2b54cbeb83bcff12f20461a4f57f882e2073f231..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp new file mode 100644 index 0000000000000000000000000000000000000000..cc8a912810f35e53a6e5fa96ea0c81e334ffc0df Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png deleted file mode 100644 index eb3848657f13900c856ac595c20766465157e9c4..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp new file mode 100644 index 0000000000000000000000000000000000000000..651b716bc511ba2ed5db5e6fc6b0591ef150cbf6 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png deleted file mode 100644 index d94b2e02ac9855bb3d2f77d8902e068839db364f..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp new file mode 100644 index 0000000000000000000000000000000000000000..8666193f59497180574fd2786266e5baabbe9761 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.png b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.png deleted file mode 100644 index 654df2934597ce600a1dc2dcd0cab7e29de7076d..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.webp b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.webp new file mode 100644 index 0000000000000000000000000000000000000000..7f38a76a2b899ffebc7aecd39c8ec4fd0b2da778 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/TDinsight-full.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.png b/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.png deleted file mode 100644 index e3afa22c0326d70567ec4529c83101c746daac87..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.webp new file mode 100644 index 0000000000000000000000000000000000000000..3d7fe932a23f3720e76e4217a7b5d1868d81fac8 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-manager-status.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.png b/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.png deleted file mode 100644 index 198bf37141c86a66cdd91b47a331bcdeb83daaf8..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.webp new file mode 100644 index 0000000000000000000000000000000000000000..517123954efe4b94485fdab2e07be0d765f5daa2 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-notification-channel.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.png b/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.png deleted file mode 100644 index ace3aa3c2f8f14fabdac54bc25ae2d9449445b69..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.webp new file mode 100644 index 0000000000000000000000000000000000000000..6666296ac16e7a0c0ab3db23f0517f2089d09035 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-query-demo.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png deleted file mode 100644 index 7082e49f6beb8690c36f98a3f4ff2befdb8fd014..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp new file mode 100644 index 0000000000000000000000000000000000000000..6f74bc3a47a32de661ef25f787a947d823715810 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.png b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.png deleted file mode 100644 index ffd4911b53854c42dbf0ff11838cb604fa694138..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.webp b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.webp new file mode 100644 index 0000000000000000000000000000000000000000..acda3b24a6263815ac8b658709d2172300ca3b00 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/alert-rule-test.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.png b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.png deleted file mode 100644 index 802c7366f921301bd7fbc62458e56b2d1eaf195c..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp new file mode 100644 index 0000000000000000000000000000000000000000..903e236e2a776dfef7f85c014662e8913a9033a5 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png deleted file mode 100644 index 019ec921b6f808671f4f864ddf3380159d4a0dcc..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp new file mode 100644 index 0000000000000000000000000000000000000000..14fcfe9d183e8804199708ae4492d0904a7c9d62 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.png b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.png deleted file mode 100644 index 3963abb4ea8ae0e6f5557466f7a5b746c2d2ea3c..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp new file mode 100644 index 0000000000000000000000000000000000000000..00b50cc619b030d1fb2be3a367183901d5c833e8 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.png b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.png deleted file mode 100644 index 837100464b35a5cafac474723aef603f91945ebc..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.webp new file mode 100644 index 0000000000000000000000000000000000000000..06d0ff6ed50091a6340508bc5b2b3f78b65dcb18 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-add-datasource.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.png b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.png deleted file mode 100644 index 98223df25499effac343ff5723544a3c289f18fa..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.webp new file mode 100644 index 0000000000000000000000000000000000000000..e2ec052b91e439a817f6e88b8afd0fcb4dcb7ef8 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-display.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png deleted file mode 100644 index 07aba348f02b4fb8ef68e79664920c119b842d4c..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp new file mode 100644 index 0000000000000000000000000000000000000000..665c035f9755b9472aee33cd61d3ab52831194b5 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.png b/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.png deleted file mode 100644 index 7e28939ead8bf3b6e2b4330e4f9b59c2e39b5c1c..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.webp b/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..7dc42eeba919fee7b438a453c00bb9fd0ac2d274 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/howto-import-dashboard.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.png b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.png deleted file mode 100644 index 981f640b14d18aa6f0682768d8405a232df500f6..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.webp b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.webp new file mode 100644 index 0000000000000000000000000000000000000000..7ef081900f8de99c859193b69d49b3d6bc187909 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-15167.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png deleted file mode 100644 index 94ef4fa5fe63e535118a81707b413c028ce01f70..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp new file mode 100644 index 0000000000000000000000000000000000000000..602452fc4c89424d8e17d46d74949b69be84dbe8 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png b/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png deleted file mode 100644 index 670cacc377c2801fa9437c3c132c5c7fbc361b0f..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp b/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp new file mode 100644 index 0000000000000000000000000000000000000000..35a3ebba781f24dbb0066993d1ca2f02659997d2 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.png b/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.png deleted file mode 100644 index d74cd36c96ee0fd24ddc6feae2da07824816f745..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.webp b/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..fb7958f1b9fbd43c8f63136024842790e711c490 Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/import_dashboard.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.png b/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.png deleted file mode 100644 index 0101e7430cb2ef673818de8bd3af53d0d082ad3f..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.png and /dev/null differ diff --git a/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.webp b/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..49f1d88f4ad93286cd8582536e82b4dcc4ff271b Binary files /dev/null and b/docs-cn/14-reference/07-tdinsight/assets/tdengine_dashboard.webp differ diff --git a/docs-cn/14-reference/07-tdinsight/index.md b/docs-cn/14-reference/07-tdinsight/index.md index c2a35012a5c42000a8ee79cb6560fe0eeadc9851..5990a831b8bc1788deaddfb38f717f2723969362 100644 --- a/docs-cn/14-reference/07-tdinsight/index.md +++ b/docs-cn/14-reference/07-tdinsight/index.md @@ -3,9 +3,9 @@ title: TDinsight - 基于Grafana的TDengine零依赖监控解决方案 sidebar_label: TDinsight --- -TDinsight 是使用 [TDengine] 原生监控数据库和 [Grafana] 对 TDengine 进行监控的解决方案。 +TDinsight 是使用内置监控数据库和 [Grafana] 对 TDengine 进行监控的解决方案。 -TDengine 启动后,会自动创建一个监测数据库 log,并自动将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库,并对重要的系统操作(比如登录、创建、删除数据库等)以及各种错误报警信息进行记录。通过 [Grafana] 和 [TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases),TDinsight 将集群状态、节点信息、插入及查询请求、资源使用情况等进行可视化展示,同时还支持 vnode、dnode、mnode 节点状态异常告警,为开发者实时监控 TDengine 集群运行状态提供了便利。本文将指导用户安装 Grafana 服务器并通过 `TDinsight.sh` 安装脚本自动安装 TDengine 数据源插件及部署 TDinsight 可视化面板。 +TDengine 启动后,会自动创建一个监测数据库 `log`,并自动将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库,并对重要的系统操作(比如登录、创建、删除数据库等)以及各种错误报警信息进行记录。通过 [Grafana] 和 [TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases),TDinsight 将集群状态、节点信息、插入及查询请求、资源使用情况等进行可视化展示,同时还支持 vnode、dnode、mnode 节点状态异常告警,为开发者实时监控 TDengine 集群运行状态提供了便利。本文将指导用户安装 Grafana 服务器并通过 `TDinsight.sh` 安装脚本自动安装 TDengine 数据源插件及部署 TDinsight 可视化面板。 ## 系统要求 @@ -68,6 +68,7 @@ sudo yum install \ ```bash wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh chmod +x TDinsight.sh +./TDinsight.sh ``` 这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://grafana.com/grafana/dashboards/15167) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。利用该脚本提供的告警设置选项,你还可以获得内置的阿里云短信告警通知支持。 @@ -76,13 +77,13 @@ chmod +x TDinsight.sh 下面是 TDinsight.sh 的用法说明: -```bash +```text Usage: ./TDinsight.sh ./TDinsight.sh -h|--help ./TDinsight.sh -n -a -u -p -Install and configure TDinsight dashboard in Grafana on ubuntu 18.04/20.04 system. +Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 system. -h, -help, --help Display help @@ -99,7 +100,7 @@ Install and configure TDinsight dashboard in Grafana on ubuntu 18.04/20.04 syste -u, --tdengine-user TDengine user name. [default: root] -p, --tdengine-password TDengine password. [default: taosdata] --i, --tdinsight-uid Replace with a non-space ascii code as the dashboard id. [default: tdinsight] +-i, --tdinsight-uid Replace with a non-space ASCII code as the dashboard id. [default: tdinsight] -t, --tdinsight-title Dashboard title. [default: TDinsight] -e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false] @@ -110,11 +111,11 @@ Aliyun SMS as Notifier: -N, --sms-notifier-name Provisioning notifier name.[default: TDinsight Builtin SMS] -U, --sms-notifier-uid Provisioning notifier uid, use lowercase notifier name by default. -D, --sms-notifier-is-default Set notifier as default. --I, --sms-access-key-id Aliyun sms access key id --K, --sms-access-key-secret Aliyun sms access key secret +-I, --sms-access-key-id Aliyun SMS access key id +-K, --sms-access-key-secret Aliyun SMS access key secret -S, --sms-sign-name Sign name -C, --sms-template-code Template code --T, --sms-template-param Template param, a escaped json string like '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' +-T, --sms-template-param Template param, a escaped JSON string like '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' -B, --sms-phone-numbers Comma-separated numbers list, eg "189xxxxxxxx,132xxxxxxxx" -L, --sms-listen-addr [default: 127.0.0.1:9100] ``` @@ -145,7 +146,7 @@ Aliyun SMS as Notifier: | -C | --sms-template-code | SMS_TEMPLATE_CODE | 模板代码 | | -T | --sms-template-param | SMS_TEMPLATE_PARAM | 模板参数的 JSON 模板 | | -B | --sms-phone-numbers | SMS_PHONE_NUMBERS | 逗号分隔的手机号列表,例如`"189xxxxxxxx,132xxxxxxxx"` | -| -L | --sms-listen-addr | SMS_LISTEN_ADDR | 内置 sms webhook 监听地址,默认为`127.0.0.1:9100` | +| -L | --sms-listen-addr | SMS_LISTEN_ADDR | 内置 SMS webhook 监听地址,默认为`127.0.0.1:9100` | 假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本: @@ -207,14 +208,14 @@ sudo grafana-cli \ plugins install tdengine-datasource ``` -### 配置 Grafana - -将以下设置添加到配置文件 `/etc/grafana/grafana.ini`,以启用未签名插件。 +:::note +3.1.6 和更早版本插件需要在配置文件 `/etc/grafana/grafana.ini` 中添加如下设置,以启用未签名插件。 ```ini [plugins] allow_loading_unsigned_plugins = tdengine-datasource ``` +::: ### 启动 Grafana 服务 @@ -232,43 +233,43 @@ sudo systemctl enable grafana-server 指向 **Configurations** -> **Data Sources** 菜单,然后点击 **Add data source** 按钮。 -![添加数据源按钮](./assets/howto-add-datasource-button.png) +![TDengine Database TDinsight 添加数据源按钮](./assets/howto-add-datasource-button.webp) 搜索并选择**TDengine**。 -![添加数据源](./assets/howto-add-datasource-tdengine.png) +![TDengine Database TDinsight 添加数据源](./assets/howto-add-datasource-tdengine.webp) 配置 TDengine 数据源。 -![数据源配置](./assets/howto-add-datasource.png) +![TDengine Database TDinsight 数据源配置](./assets/howto-add-datasource.webp) 保存并测试,正常情况下会报告 'TDengine Data source is working'。 -![数据源测试](./assets/howto-add-datasource-test.png) +![TDengine Database TDinsight 数据源测试](./assets/howto-add-datasource-test.webp) ### 导入仪表盘 指向 **+** / **Create** - **import**(或 `/dashboard/import` url)。 -![导入仪表盘和配置](./assets/import_dashboard.png) +![TDengine Database TDinsight 导入仪表盘和配置](./assets/import_dashboard.webp) 在 **Import via grafana.com** 位置键入仪表盘 ID `15167` 并 **Load**。 -![通过 grafana.com 导入](./assets/import-dashboard-15167.png) +![通过 grafana.com 导入](./assets/import-dashboard-15167.webp) 导入完成后,TDinsight 的完整页面视图如下所示。 -![显示](./assets/TDinsight-full.png) +![TDengine Database TDinsight 显示](./assets/TDinsight-full.webp) ## TDinsight 仪表盘详细信息 -TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mdodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster)或数据库的使用情况和状态。 +TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster)或数据库的使用情况和状态。 指标详情如下: ### 集群状态 -![tdinsight-mnodes-overview](./assets/TDinsight-1-cluster-status.png) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-1-cluster-status.webp) 这部分包括集群当前信息和状态,告警信息也在此处(从左到右,从上到下)。 @@ -281,14 +282,14 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mdodes - **Connections** - 当前连接个数。 - **DNodes/MNodes/VGroups/VNodes**:每种资源的总数和存活数。 - **DNodes/MNodes/VGroups/VNodes Alive Percent**:每种资源的存活数/总数的比例,启用告警规则,并在资源存活率(1 分钟内平均健康资源比例)不足 100%时触发。 -- **Messuring Points Used**:启用告警规则的测点数用量(社区版无数据,默认情况下是健康的)。 +- **Measuring Points Used**:启用告警规则的测点数用量(社区版无数据,默认情况下是健康的)。 - **Grants Expire Time**:启用告警规则的企业版过期时间(社区版无数据,默认情况是健康的)。 - **Error Rate**:启用警报的集群总合错误率(每秒平均错误数)。 - **Variables**:`show variables` 表格展示。 ### DNodes 状态 -![tdinsight-mnodes-overview](./assets/TDinsight-2-dnodes.png) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-2-dnodes.webp) - **DNodes Status**:`show dnodes` 的简单表格视图。 - **DNodes Lifetime**:从创建 dnode 开始经过的时间。 @@ -297,14 +298,14 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mdodes ### MNode 概述 -![tdinsight-mnodes-overview](./assets/TDinsight-3-mnodes.png) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-3-mnodes.webp) 1. **MNodes Status**:`show mnodes` 的简单表格视图。 2. **MNodes Number**:类似于`DNodes Number`,MNodes 数量变化。 ### 请求 -![tdinsight-requests](./assets/TDinsight-4-requests.png) +![TDengine Database TDinsight requests](./assets/TDinsight-4-requests.webp) 1. **Requests Rate(Inserts per Second)**:平均每秒插入次数。 2. **Requests (Selects)**:查询请求数及变化率(count of second)。 @@ -312,7 +313,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mdodes ### 数据库 -![tdinsight-database](./assets/TDinsight-5-database.png) +![TDengine Database TDinsight database](./assets/TDinsight-5-database.webp) 数据库使用情况,对变量 `$database` 的每个值即每个数据库进行重复多行展示。 @@ -324,7 +325,7 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mdodes ### DNode 资源使用情况 -![dnode-usage](./assets/TDinsight-6-dnode-usage.png) +![TDengine Database TDinsight dnode-usage](./assets/TDinsight-6-dnode-usage.webp) 数据节点资源使用情况展示,对变量 `$fqdn` 即每个数据节点进行重复多行展示。包括: @@ -345,22 +346,22 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mdodes ### 登录历史 -![登录历史](./assets/TDinsight-7-login-history.png) +![TDengine Database TDinsight 登录历史](./assets/TDinsight-7-login-history.webp) 目前只报告每分钟登录次数。 -### TaosAdapter +### 监控 taosAdapter -![taosadapter](./assets/TDinsight-8-taosadapter.png) +![TDengine Database TDinsight monitor taosadapter](./assets/TDinsight-8-taosadapter.webp) -包含 taosAdapter 请求统计和状态详情。包括: +支持监控 taosAdapter 请求统计和状态详情。包括: 1. **http_request**: 包含总请求数,请求失败数以及正在处理的请求数 2. **top 3 request endpoint**: 按终端分组,请求排名前三的数据 3. **Memory Used**: taosAdapter 内存使用情况 4. **latency_quantile(ms)**: (1, 2, 5, 9, 99)阶段的分位数 5. **top 3 failed request endpoint**: 按终端分组,请求失败排名前三的数据 -6. **CPU Used**: taosAdapter cpu 使用情况 +6. **CPU Used**: taosAdapter CPU 使用情况 ## 升级 diff --git a/docs-cn/14-reference/12-config/index.md b/docs-cn/14-reference/12-config/index.md index cbb3833b5bb170720c2aa7bea6687a50feeae7d5..89c414a5b8479d8253b2a1fa1e3ab3b684f75e78 100644 --- a/docs-cn/14-reference/12-config/index.md +++ b/docs-cn/14-reference/12-config/index.md @@ -80,7 +80,7 @@ taos --dump-config | 补充说明 | RESTful 服务在 2.4.0.0 之前(不含)由 taosd 提供,默认端口为 6041; 在 2.4.0.0 及后续版本由 taosAdapter,默认端口为 6041 | :::note -对于端口,TDengine 会使用从 serverPort 起 13 个连续的 TCP 和 UDP 端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从 6030 到 6042 共 13 个端口,而且必须 TCP 和 UDP 都打开。(详细的端口情况请参见下表) +确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。(详细的端口情况请参见下表) ::: | 协议 | 默认端口 | 用途说明 | 修改方法 | | :--- | :-------- | :---------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------- | @@ -590,7 +590,7 @@ charset 的有效值是 UTF-8。 | 适用范围 | 仅服务端适用 | | 含义 | 每个 DB 中 能够使用的最大 vnode 个数 | | 取值范围 | 0-8192 | -| 缺省值 | | +| 缺省值 | 0 | ### maxTablesPerVnode diff --git a/docs-cn/14-reference/13-schemaless/13-schemaless.md b/docs-cn/14-reference/13-schemaless/13-schemaless.md index 4de310c248d7763690acef80cdca1c50f609d63b..f2712f2814593bddd65401cb129c8c58ee55a316 100644 --- a/docs-cn/14-reference/13-schemaless/13-schemaless.md +++ b/docs-cn/14-reference/13-schemaless/13-schemaless.md @@ -82,7 +82,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 :::tip 无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 -16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit) +48KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit) ::: diff --git a/docs-cn/14-reference/taosAdapter-architecture.png b/docs-cn/14-reference/taosAdapter-architecture.png deleted file mode 100644 index 08a9018553aae6f86b42d127b372d0cecfa9bdf8..0000000000000000000000000000000000000000 Binary files a/docs-cn/14-reference/taosAdapter-architecture.png and /dev/null differ diff --git a/docs-cn/14-reference/taosAdapter-architecture.webp b/docs-cn/14-reference/taosAdapter-architecture.webp new file mode 100644 index 0000000000000000000000000000000000000000..a4162b0a037c06d34191784716c51080b9f8a570 Binary files /dev/null and b/docs-cn/14-reference/taosAdapter-architecture.webp differ diff --git a/docs-cn/20-third-party/01-grafana.mdx b/docs-cn/20-third-party/01-grafana.mdx index 39420a01a308d41924d189fce75e8a372e294eba..328bd6bb4595a6d205cff45539d69e868d33d488 100644 --- a/docs-cn/20-third-party/01-grafana.mdx +++ b/docs-cn/20-third-party/01-grafana.mdx @@ -8,6 +8,7 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ ## 前置条件 要让 Grafana 能正常添加 TDengine 数据源,需要以下几方面的准备工作。 + - TDengine 集群已经部署并正常运行 - taosAdapter 已经安装并正常运行。具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter) @@ -23,14 +24,14 @@ TDengine 的 Grafana 插件托管在 GitHub,可从 Data Sources` 可以添加数据源,如下图所示: -![img](/img/connections/add_datasource1.jpg) +![TDengine Database Grafana plugin add data source](./add_datasource1.webp) 点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示: -![img](/img/connections/add_datasource2.jpg) +![TDengine Database Grafana plugin add data source](./add_datasource2.webp) 进入数据源配置页面,按照默认提示修改相应配置即可: -![img](/img/connections/add_datasource3.jpg) +![TDengine Database Grafana plugin add data source](./add_datasource3.webp) - Host: TDengine 集群中提供 REST 服务 (在 2.4 之前由 taosd 提供, 从 2.4 开始由 taosAdapter 提供)的组件所在服务器的 IP 地址与 TDengine REST 服务的端口号(6041),默认 http://localhost:6041。 - User:TDengine 用户名。 @@ -76,13 +80,13 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource 点击 `Save & Test` 进行测试,成功会有如下提示: -![img](/img/connections/add_datasource4.jpg) +![TDengine Database Grafana plugin add data source](./add_datasource4.webp) ### 创建 Dashboard 回到主界面创建 Dashboard,点击 Add Query 进入面板查询页面: -![img](/img/connections/create_dashboard1.jpg) +![TDengine Database Grafana plugin create dashboard](./create_dashboard1.webp) 如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询,具体说明如下: @@ -92,7 +96,7 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource 按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下: -![img](/img/connections/create_dashboard2.jpg) +![TDengine Database Grafana plugin create dashboard](./create_dashboard2.webp) > 关于如何使用 Grafana 创建相应的监测界面以及更多有关使用 Grafana 的信息,请参考 Grafana 官方的[文档](https://grafana.com/docs/)。 diff --git a/docs-cn/20-third-party/09-emq-broker.md b/docs-cn/20-third-party/09-emq-broker.md index f57ccb20e6517c51b55093d11fa767bef7d0c9a8..833fa97e2e5f9f138718e18bb16aa3e65abca8cc 100644 --- a/docs-cn/20-third-party/09-emq-broker.md +++ b/docs-cn/20-third-party/09-emq-broker.md @@ -45,25 +45,25 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em 使用浏览器打开网址 http://IP:18083 并登录 EMQX Dashboard。初次安装用户名为 `admin` 密码为:`public` -![img](./emqx/login-dashboard.png) +![TDengine Database EMQX login dashboard](./emqx/login-dashboard.webp) ### 创建规则(Rule) 选择左侧“规则引擎(Rule Engine)”中的“规则(Rule)”并点击“创建(Create)”按钮: -![img](./emqx/rule-engine.png) +![TDengine Database EMQX rule engine](./emqx/rule-engine.webp) ### 编辑 SQL 字段 -![img](./emqx/create-rule.png) +![TDengine Database EMQX create rule](./emqx/create-rule.webp) ### 新增“动作(action handler)” -![img](./emqx/add-action-handler.png) +![TDengine Database EMQX](./emqx/add-action-handler.webp) ### 新增“资源(Resource)” -![img](./emqx/create-resource.png) +![TDengine Database EMQX create resource](./emqx/create-resource.webp) 选择“发送数据到 Web 服务“并点击“新建资源”按钮: @@ -71,13 +71,13 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em 选择“发送数据到 Web 服务“并填写 请求 URL 为 运行 taosAdapter 的服务器地址和端口(默认为 6041)。其他属性请保持默认值。 -![img](./emqx/edit-resource.png) +![TDengine Database EMQX edit resource](./emqx/edit-resource.webp) ### 编辑“动作(action)” 编辑资源配置,增加 Authorization 认证的键/值配对项,相关文档请参考[ TDengine REST API 文档](https://docs.taosdata.com/reference/rest-api/)。在消息体中输入规则引擎替换模板。 -![img](./emqx/edit-action.png) +![TDengine Database EMQX edit action](./emqx/edit-action.webp) ## 编写模拟测试程序 @@ -164,7 +164,7 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em 注意:代码中 CLIENT_NUM 在开始测试中可以先设置一个较小的值,避免硬件性能不能完全处理较大并发客户端数量。 -![img](./emqx/client-num.png) +![TDengine Database EMQX client num](./emqx/client-num.webp) ## 执行测试模拟发送 MQTT 数据 @@ -173,19 +173,19 @@ npm install mqtt mockjs --save --registry=https://registry.npm.taobao.org node mock.js ``` -![img](./emqx/run-mock.png) +![TDengine Database EMQX run-mock](./emqx/run-mock.webp) ## 验证 EMQX 接收到数据 在 EMQX Dashboard 规则引擎界面进行刷新,可以看到有多少条记录被正确接收到: -![img](./emqx/check-rule-matched.png) +![TDengine Database EMQX rule matched](./emqx/check-rule-matched.webp) ## 验证数据写入到 TDengine 使用 TDengine CLI 程序登录并查询相应数据库和表,验证数据是否被正确写入到 TDengine 中: -![img](./emqx/check-result-in-taos.png) +![TDengine Database EMQX result in taos](./emqx/check-result-in-taos.webp) TDengine 详细使用方法请参考 [TDengine 官方文档](https://docs.taosdata.com/)。 EMQX 详细使用方法请参考 [EMQX 官方文档](https://www.emqx.io/docs/zh/v4.4/rule/rule-engine.html)。 diff --git a/docs-cn/20-third-party/11-kafka.md b/docs-cn/20-third-party/11-kafka.md index f76c6384d12072a4548e493190d1edd8ac615b40..8369806adcfe1b195348e7d60160609cde9150e8 100644 --- a/docs-cn/20-third-party/11-kafka.md +++ b/docs-cn/20-third-party/11-kafka.md @@ -7,17 +7,17 @@ TDengine Kafka Connector 包含两个插件: TDengine Source Connector 和 TDeng ## 什么是 Kafka Connect? -Kafka Connect 是 Apache Kafka 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka Broker,Source Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。 +Kafka Connect 是 [Apache Kafka](https://kafka.apache.org/) 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka Broker,Source Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。 -![](Kafka_Connect.png) +![TDengine Database Kafka Connector -- Kafka Connect structure](kafka/Kafka_Connect.webp) TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送给 Kafka Connect。TDengine Sink Connector 用于 从 Kafka Connect 接收数据并写入 TDengine。 -![](streaming-integration-with-kafka-connect.png) +![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) ## 什么是 Confluent? -Confluent 在 Kafka 的基础上增加很多扩展功能。包括: +[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括: 1. Schema Registry 2. REST 代理 @@ -26,7 +26,7 @@ Confluent 在 Kafka 的基础上增加很多扩展功能。包括: 5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心 这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。 -![](confluentPlatform.png) +![TDengine Database Kafka Connector -- Confluent introduction](kafka/confluentPlatform.webp) Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。 @@ -81,10 +81,10 @@ Development: false git clone https://github.com:taosdata/kafka-connect-tdengine.git cd kafka-connect-tdengine mvn clean package -unzip -d $CONFLUENT_HOME/share/confluent-hub-components/ target/components/packages/taosdata-kafka-connect-tdengine-0.1.0.zip +unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip ``` -以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。安装插件的路径在配置文件 `$CONFLUENT_HOME/etc/kafka/connect-standalone.properties` 中。默认的路径为 `$CONFLUENT_HOME/share/confluent-hub-components/`。 +以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。 ### 用 confluent-hub 安装 @@ -98,7 +98,7 @@ confluent local services start ``` :::note -一定要先安装插件再启动 Confluent, 否则会出现找不到类的错误。Kafka Connect 的日志(默认路径: /tmp/confluent.xxxx/connect/logs/connect.log)中会输出成功安装的插件,据此可判断插件是否安装成功。 +一定要先安装插件再启动 Confluent, 否则加载插件会失败。 ::: :::tip @@ -125,6 +125,61 @@ Control Center is [UP] 清空数据可执行 `rm -rf /tmp/confluent.106668`。 ::: +### 验证各个组件是否启动成功 + +输入命令: + +``` +confluent local services status +``` + +如果各组件都启动成功,会得到如下输出: + +``` +Connect is [UP] +Control Center is [UP] +Kafka is [UP] +Kafka REST is [UP] +ksqlDB Server is [UP] +Schema Registry is [UP] +ZooKeeper is [UP] +``` + +### 验证插件是否安装成功 + +在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件: + +``` +confluent local services connect plugin list +``` + +如果成功安装,会输出如下: + +```txt {4,9} +Available Connect Plugins: +[ + { + "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "type": "sink", + "version": "1.0.0" + }, + { + "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector", + "type": "source", + "version": "1.0.0" + }, +...... +``` + +如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径: +``` +echo `cat /tmp/confluent.current`/connect/connect.stdout +``` +该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`。 + +与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path`, 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。 + + ## TDengine Sink Connector 的使用 TDengine Sink Connector 的作用是同步指定 topic 的数据到 TDengine。用户无需提前创建数据库和超级表。可手动指定目标数据库的名字(见配置参数 connection.database), 也可按一定规则生成(见配置参数 connection.database.prefix)。 @@ -144,7 +199,7 @@ vi sink-demo.properties sink-demo.properties 内容如下: ```ini title="sink-demo.properties" -name=tdengine-sink-demo +name=TDengineSinkConnector connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector tasks.max=1 topics=meters @@ -153,6 +208,7 @@ connection.user=root connection.password=taosdata connection.database=power db.schemaless=line +data.precision=ns key.converter=org.apache.kafka.connect.storage.StringConverter value.converter=org.apache.kafka.connect.storage.StringConverter ``` @@ -179,6 +235,7 @@ confluent local services connect connector load TDengineSinkConnector --config . "connection.url": "jdbc:TAOS://127.0.0.1:6030", "connection.user": "root", "connector.class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "data.precision": "ns", "db.schemaless": "line", "key.converter": "org.apache.kafka.connect.storage.StringConverter", "tasks.max": "1", @@ -196,10 +253,10 @@ confluent local services connect connector load TDengineSinkConnector --config . 准备测试数据的文本文件,内容如下: ```txt title="test-data.txt" -meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000 -meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000 -meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000 -meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000 +meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000 +meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000 +meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000 +meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000 ``` 使用 kafka-console-producer 向主题 meters 添加测试数据。 @@ -223,10 +280,10 @@ Database changed. taos> select * from meters; ts | current | voltage | phase | groupid | location | =============================================================================================================================================================== - 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | Beijing.Haidian | - 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | Beijing.Haidian | - 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | Beijing.Haidian | - 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | Beijing.Haidian | + 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles | Query OK, 4 row(s) in set (0.004208s) ``` @@ -275,13 +332,13 @@ DROP DATABASE IF EXISTS test; CREATE DATABASE test; USE test; CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); -INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000); +INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000); ``` 使用 TDengine CLI, 执行 SQL 文件。 ``` -taos -f prepare-sorce-data.sql +taos -f prepare-source-data.sql ``` ### 创建 Connector 实例 @@ -302,8 +359,8 @@ kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topi ``` ...... -meters,location="beijing.chaoyang",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 -meters,location="beijing.chaoyang",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 +meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 +meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 ...... ``` @@ -356,21 +413,33 @@ confluent local services connect connector unload TDengineSourceConnector 2. `connection.database.prefix`: 当 connection.database 为 null 时, 目标数据库的前缀。可以包含占位符 '${topic}'。 比如 kafka_${topic}, 对于主题 'orders' 将写入数据库 'kafka_orders'。 默认 null。当为 null 时,目标数据库的名字和主题的名字是一致的。 3. `batch.size`: 分批写入每批记录数。当 Sink Connector 一次接收到的数据大于这个值时将分批写入。 4. `max.retries`: 发生错误时的最大重试次数。默认为 1。 -5. `retry.backoff.ms`: 发送错误时重试的时间间隔。单位毫秒,默认 3000。 -6. `db.schemaless`: 数据格式,必须指定为: line、json、telnet 中的一个。分别代表 InfluxDB 行协议格式、 OpenTSDB JSON 格式、 OpenTSDB Telnet 行协议格式。 +5. `retry.backoff.ms`: 发送错误时重试的时间间隔。单位毫秒,默认为 3000。 +6. `db.schemaless`: 数据格式,可选值为: + 1. line :代表 InfluxDB 行协议格式 + 2. json : 代表 OpenTSDB JSON 格式 + 3. telnet :代表 OpenTSDB Telnet 行协议格式 +7. `data.precision`: 使用 InfluxDB 行协议格式时,时间戳的精度。可选值为: + 1. ms : 表示毫秒 + 2. us : 表示微秒 + 3. ns : 表示纳秒。默认为纳秒。 ### TDengine Source Connector 特有的配置 1. `connection.database`: 源数据库名称,无缺省值。 2. `topic.prefix`: 数据导入 kafka 后 topic 名称前缀。 使用 `topic.prefix` + `connection.database` 名称作为完整 topic 名。默认为空字符串 ""。 -3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认 "1970-01-01 00:00:00"。 -4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认 1000。 +3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认为 "1970-01-01 00:00:00"。 +4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认为 1000。 5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。 -6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认 line。 +6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认为 line。 + +## 其他说明 + +1. 插件的安装位置可以自定义,请参考官方文档:https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。 +2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。 ## 问题反馈 -https://github.com/taosdata/kafka-connect-tdengine/issues +无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues。 ## 参考 diff --git a/docs-cn/20-third-party/Kafka_Connect.png b/docs-cn/20-third-party/Kafka_Connect.png deleted file mode 100644 index f3dc02ea2a743c6e1ae5531e14f820e3adeca29a..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/Kafka_Connect.png and /dev/null differ diff --git a/docs-cn/20-third-party/add_datasource1.webp b/docs-cn/20-third-party/add_datasource1.webp new file mode 100644 index 0000000000000000000000000000000000000000..211edc4457abd0db6b0ef64636d61d65b5f43db6 Binary files /dev/null and b/docs-cn/20-third-party/add_datasource1.webp differ diff --git a/docs-cn/20-third-party/add_datasource2.webp b/docs-cn/20-third-party/add_datasource2.webp new file mode 100644 index 0000000000000000000000000000000000000000..8ab547231fee4d3b0874fcfe08c0ce152b0c53a1 Binary files /dev/null and b/docs-cn/20-third-party/add_datasource2.webp differ diff --git a/docs-cn/20-third-party/add_datasource3.webp b/docs-cn/20-third-party/add_datasource3.webp new file mode 100644 index 0000000000000000000000000000000000000000..d8a733360a09b4425c571f254a9ecb298c04b72f Binary files /dev/null and b/docs-cn/20-third-party/add_datasource3.webp differ diff --git a/docs-cn/20-third-party/add_datasource4.webp b/docs-cn/20-third-party/add_datasource4.webp new file mode 100644 index 0000000000000000000000000000000000000000..b1e0fc6e2b27df4af1bb5ad92756bcb5d4fda63e Binary files /dev/null and b/docs-cn/20-third-party/add_datasource4.webp differ diff --git a/docs-cn/20-third-party/confluentPlatform.png b/docs-cn/20-third-party/confluentPlatform.png deleted file mode 100644 index f8e69f2c7f64d809996b2d1bf1370b67b8030850..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/confluentPlatform.png and /dev/null differ diff --git a/docs-cn/20-third-party/create_dashboard1.webp b/docs-cn/20-third-party/create_dashboard1.webp new file mode 100644 index 0000000000000000000000000000000000000000..55eb388833e4df2a46f4d1cf6d346aa11429385d Binary files /dev/null and b/docs-cn/20-third-party/create_dashboard1.webp differ diff --git a/docs-cn/20-third-party/create_dashboard2.webp b/docs-cn/20-third-party/create_dashboard2.webp new file mode 100644 index 0000000000000000000000000000000000000000..bb40e407187718c52e9f617d8ebd3d25fd14b56b Binary files /dev/null and b/docs-cn/20-third-party/create_dashboard2.webp differ diff --git a/docs-cn/20-third-party/dashboard-15146.webp b/docs-cn/20-third-party/dashboard-15146.webp new file mode 100644 index 0000000000000000000000000000000000000000..fae586f5c74317621002416b2824830a7bdf3982 Binary files /dev/null and b/docs-cn/20-third-party/dashboard-15146.webp differ diff --git a/docs-cn/20-third-party/emqx/add-action-handler.png b/docs-cn/20-third-party/emqx/add-action-handler.png deleted file mode 100644 index 97a1f933ecfadfcab399938806d73c5a5ecc6427..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/add-action-handler.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/add-action-handler.webp b/docs-cn/20-third-party/emqx/add-action-handler.webp new file mode 100644 index 0000000000000000000000000000000000000000..4a8d105f711991226cfbd43b6e9ab07d7ccc686a Binary files /dev/null and b/docs-cn/20-third-party/emqx/add-action-handler.webp differ diff --git a/docs-cn/20-third-party/emqx/check-result-in-taos.png b/docs-cn/20-third-party/emqx/check-result-in-taos.png deleted file mode 100644 index c17a5c1ea2b9bbd49263056c8bf09c9aabab07d5..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/check-result-in-taos.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/check-result-in-taos.webp b/docs-cn/20-third-party/emqx/check-result-in-taos.webp new file mode 100644 index 0000000000000000000000000000000000000000..8fa040a86104fece02ddaf8986f0a67de316143d Binary files /dev/null and b/docs-cn/20-third-party/emqx/check-result-in-taos.webp differ diff --git a/docs-cn/20-third-party/emqx/check-rule-matched.png b/docs-cn/20-third-party/emqx/check-rule-matched.png deleted file mode 100644 index 9e9a466946a1afa857e2bbc07b14956dd0f984b6..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/check-rule-matched.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/check-rule-matched.webp b/docs-cn/20-third-party/emqx/check-rule-matched.webp new file mode 100644 index 0000000000000000000000000000000000000000..e5a614035739df859b27c817f3b9f41be444b513 Binary files /dev/null and b/docs-cn/20-third-party/emqx/check-rule-matched.webp differ diff --git a/docs-cn/20-third-party/emqx/client-num.png b/docs-cn/20-third-party/emqx/client-num.png deleted file mode 100644 index fff48cbf3b271c367079ddde425b3f9b014062f7..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/client-num.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/client-num.webp b/docs-cn/20-third-party/emqx/client-num.webp new file mode 100644 index 0000000000000000000000000000000000000000..a151b184843607d67b649babb3145bfb3e329cda Binary files /dev/null and b/docs-cn/20-third-party/emqx/client-num.webp differ diff --git a/docs-cn/20-third-party/emqx/create-resource.png b/docs-cn/20-third-party/emqx/create-resource.png deleted file mode 100644 index 58da4c391a3692b9f5fa348d952701eab8bcb746..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/create-resource.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/create-resource.webp b/docs-cn/20-third-party/emqx/create-resource.webp new file mode 100644 index 0000000000000000000000000000000000000000..bf9cccbe49c57f925c5e6b094a4c0d88a64242cb Binary files /dev/null and b/docs-cn/20-third-party/emqx/create-resource.webp differ diff --git a/docs-cn/20-third-party/emqx/create-rule.png b/docs-cn/20-third-party/emqx/create-rule.png deleted file mode 100644 index 73b0b6ee3e6065a142df98abe8c0dbb32b34f89d..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/create-rule.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/create-rule.webp b/docs-cn/20-third-party/emqx/create-rule.webp new file mode 100644 index 0000000000000000000000000000000000000000..13e8fc83d48d2fd9d0a303c707ef3024d3ee5203 Binary files /dev/null and b/docs-cn/20-third-party/emqx/create-rule.webp differ diff --git a/docs-cn/20-third-party/emqx/edit-action.png b/docs-cn/20-third-party/emqx/edit-action.png deleted file mode 100644 index 2a43ee369a439cf11cee23c11f25d6a84b26d7dc..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/edit-action.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/edit-action.webp b/docs-cn/20-third-party/emqx/edit-action.webp new file mode 100644 index 0000000000000000000000000000000000000000..7f6d2e36a82b1917930e5d3969115db9359674a0 Binary files /dev/null and b/docs-cn/20-third-party/emqx/edit-action.webp differ diff --git a/docs-cn/20-third-party/emqx/edit-resource.png b/docs-cn/20-third-party/emqx/edit-resource.png deleted file mode 100644 index 0a0b3560044f4ed6e0a8f040b74085a7e8948b1f..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/edit-resource.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/edit-resource.webp b/docs-cn/20-third-party/emqx/edit-resource.webp new file mode 100644 index 0000000000000000000000000000000000000000..fd5d278fab16bba4e04e1c348d4086dce77abb98 Binary files /dev/null and b/docs-cn/20-third-party/emqx/edit-resource.webp differ diff --git a/docs-cn/20-third-party/emqx/login-dashboard.png b/docs-cn/20-third-party/emqx/login-dashboard.png deleted file mode 100644 index d6c5035c98d860faf639ef6611c6719adf80c47b..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/login-dashboard.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/login-dashboard.webp b/docs-cn/20-third-party/emqx/login-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..f84cee668fb6efe1586515ba0dee3ae2f10a5b30 Binary files /dev/null and b/docs-cn/20-third-party/emqx/login-dashboard.webp differ diff --git a/docs-cn/20-third-party/emqx/rule-engine.png b/docs-cn/20-third-party/emqx/rule-engine.png deleted file mode 100644 index db110a837b024c82ee9d22f02dcd3a9d06abdd55..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/rule-engine.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/rule-engine.webp b/docs-cn/20-third-party/emqx/rule-engine.webp new file mode 100644 index 0000000000000000000000000000000000000000..c1711c8cc757cd73fef5cb941a1818756241f7f0 Binary files /dev/null and b/docs-cn/20-third-party/emqx/rule-engine.webp differ diff --git a/docs-cn/20-third-party/emqx/rule-header-key-value.png b/docs-cn/20-third-party/emqx/rule-header-key-value.png deleted file mode 100644 index b81b9a9684aa2f98d00b7ec21e5de411fb450312..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/rule-header-key-value.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/rule-header-key-value.webp b/docs-cn/20-third-party/emqx/rule-header-key-value.webp new file mode 100644 index 0000000000000000000000000000000000000000..e645b3822dffec86f4926e78a57eaffa1e7f4d8d Binary files /dev/null and b/docs-cn/20-third-party/emqx/rule-header-key-value.webp differ diff --git a/docs-cn/20-third-party/emqx/run-mock.png b/docs-cn/20-third-party/emqx/run-mock.png deleted file mode 100644 index 0da25818575247732d5d3a783aa52cf7ce24662c..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/emqx/run-mock.png and /dev/null differ diff --git a/docs-cn/20-third-party/emqx/run-mock.webp b/docs-cn/20-third-party/emqx/run-mock.webp new file mode 100644 index 0000000000000000000000000000000000000000..ed33f1666d456f1ab40ed6830af4550d4c7ca037 Binary files /dev/null and b/docs-cn/20-third-party/emqx/run-mock.webp differ diff --git a/docs-cn/20-third-party/import_dashboard1.webp b/docs-cn/20-third-party/import_dashboard1.webp new file mode 100644 index 0000000000000000000000000000000000000000..d4fb374ce8bb75c8a0fbdbb9cab5b30eb29ab06d Binary files /dev/null and b/docs-cn/20-third-party/import_dashboard1.webp differ diff --git a/docs-cn/20-third-party/import_dashboard2.webp b/docs-cn/20-third-party/import_dashboard2.webp new file mode 100644 index 0000000000000000000000000000000000000000..9f74dc96be20ab64b5fb555aaccdaa1c1139b35c Binary files /dev/null and b/docs-cn/20-third-party/import_dashboard2.webp differ diff --git a/docs-cn/20-third-party/index.md b/docs-cn/20-third-party/index.md index 2d21d1d74276ee12ad857367217273eda184a7b9..b493203225c89944759a216f2b75e0afa6ad03ba 100644 --- a/docs-cn/20-third-party/index.md +++ b/docs-cn/20-third-party/index.md @@ -4,9 +4,11 @@ title: 第三方工具 TDengine 通过对标准 SQL 命令、常用数据库连接器标准(例如 JDBC)、ORM 以及其他流行时序数据库写入协议(例如 InfluxDB Line Protocol、OpenTSDB JSON、OpenTSDB Telnet 等)的支持可以使 TDengine 非常容易和第三方工具共同使用。 +对于支持的第三方工具,无需任何代码,你只需要做简单的配置,就可以将 TDengine 与第三方工具无缝集成起来。 + ```mdx-code-block import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-cn/20-third-party/kafka/Kafka_Connect.webp b/docs-cn/20-third-party/kafka/Kafka_Connect.webp new file mode 100644 index 0000000000000000000000000000000000000000..8f2000a749b0a2ccec9939abd144c53c44fbe171 Binary files /dev/null and b/docs-cn/20-third-party/kafka/Kafka_Connect.webp differ diff --git a/docs-cn/20-third-party/kafka/confluentPlatform.webp b/docs-cn/20-third-party/kafka/confluentPlatform.webp new file mode 100644 index 0000000000000000000000000000000000000000..ff03d4e51aaaec85f07ff41ecda0fb9bd6cb2847 Binary files /dev/null and b/docs-cn/20-third-party/kafka/confluentPlatform.webp differ diff --git a/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.webp b/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.webp new file mode 100644 index 0000000000000000000000000000000000000000..120d534ec132cea2ccef6cf87a3ce680a5ac6e9c Binary files /dev/null and b/docs-cn/20-third-party/kafka/streaming-integration-with-kafka-connect.webp differ diff --git a/docs-cn/20-third-party/streaming-integration-with-kafka-connect.png b/docs-cn/20-third-party/streaming-integration-with-kafka-connect.png deleted file mode 100644 index 26d8a866d706180c900d69bb6f57ca2dff0047dd..0000000000000000000000000000000000000000 Binary files a/docs-cn/20-third-party/streaming-integration-with-kafka-connect.png and /dev/null differ diff --git a/docs-cn/21-tdinternal/01-arch.md b/docs-cn/21-tdinternal/01-arch.md index 6f479efc1ad13e27899e7819d194a2df59ed3ad1..433cb4808b60ce73c639a23beef45fb8e1afb7dd 100644 --- a/docs-cn/21-tdinternal/01-arch.md +++ b/docs-cn/21-tdinternal/01-arch.md @@ -11,7 +11,7 @@ TDengine 的设计是基于单个硬件、软件系统不可靠,基于任何 TDengine 分布式架构的逻辑结构图如下: -![TDengine架构示意图](/img/architecture/structure.png) +![TDengine Database 架构示意图](./structure.webp)
图 1 TDengine架构示意图
@@ -41,7 +41,7 @@ TDengine 分布式架构的逻辑结构图如下: - 集群数据节点对外提供 RESTful 服务占用一个 TCP 端口,是 serverPort+11。 - 集群内数据节点与 Arbitrator 节点之间通讯占用一个 TCP 端口,是 serverPort+12。 -因此一个数据节点总的端口范围为 serverPort 到 serverPort+12,总共 13 个 TCP/UDP 端口。使用时,需要确保防火墙将这些端口打开。每个数据节点可以配置不同的 serverPort。详细的端口情况请参见 [TDengine 2.0 端口说明](/train-faq/faq#port) +因此一个数据节点总的端口范围为 serverPort 到 serverPort+12,总共 13 个 TCP/UDP 端口。确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。详细的端口情况请参见 [TDengine 2.0 端口说明](/train-faq/faq#port) **集群对外连接:**TDengine 集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的 End Point(FQDN 加配置的端口号)。通过命令行 CLI 启动应用 taos 时,可以通过选项-h 来指定数据节点的 FQDN,-P 来指定其配置的端口号,如果端口不配置,将采用 TDengine 的系统配置参数 serverPort。 @@ -63,7 +63,7 @@ TDengine 分布式架构的逻辑结构图如下: 为解释 vnode、mnode、taosc 和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。 -![TDengine典型的操作流程](/img/architecture/message.png) +![TDengine Database 典型的操作流程](./message.webp)
图 2 TDengine 典型的操作流程
@@ -135,7 +135,7 @@ TDengine 除 vnode 分片之外,还对时序数据按照时间段进行分区 Master Vnode 遵循下面的写入流程: -![TDengine Master写入流程](/img/architecture/write_master.png) +![TDengine Database Master写入流程](./write_master.webp)
图 3 TDengine Master 写入流程
@@ -150,7 +150,7 @@ Master Vnode 遵循下面的写入流程: 对于 slave vnode,写入流程是: -![TDengine Slave 写入流程](/img/architecture/write_slave.png) +![TDengine Database Slave 写入流程](./write_slave.webp)
图 4 TDengine Slave 写入流程
@@ -284,7 +284,7 @@ SELECT COUNT(*) FROM d1001 WHERE ts >= '2017-7-14 00:00:00' AND ts < '2017-7-14 TDengine 对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine 引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以有多个,可以随时增加、删除和修改。应用可通过指定标签的过滤条件,对一个 STable 下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示: -![多表聚合查询原理图](/img/architecture/multi_tables.png) +![TDengine Database 多表聚合查询原理图](./multi_tables.webp)
图 5 多表聚合查询原理图
diff --git a/docs-cn/21-tdinternal/02-replica.md b/docs-cn/21-tdinternal/02-replica.md index 6a384b982d22956dd514d8df05dc827ca6f8b729..25d1edab6e9b97be13c8675491cc90ed54520865 100644 --- a/docs-cn/21-tdinternal/02-replica.md +++ b/docs-cn/21-tdinternal/02-replica.md @@ -93,7 +93,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 具体的流程图如下: -![replica-master.png](/img/architecture/replica-master.png) +![TDengine Database replica master](./replica-master.webp) 选择Master的具体规则如下: @@ -108,7 +108,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 如果vnode A是master, vnode B是slave, vnode A能接受客户端的写请求,而vnode B不能。当vnode A收到写的请求后,遵循下面的流程: -![replica-forward.png](/img/architecture/replica-forward.png) +![TDengine Database replica forward](./replica-forward.webp) 1. 应用对写请求做基本的合法性检查,通过,则给该请求包打上一个版本号(version, 单调递增) 2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log) @@ -143,7 +143,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 整个数据恢复流程分为两大步骤,第一步,先恢复archived data(file), 然后恢复wal。具体流程如下: -![replica-restore.png](/img/architecture/replica-restore.png) +![TDengine Database replica restore](./replica-restore.webp) 1. 通过已经建立的TCP连接,发送sync req给master节点 2. master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP连接(syncFd) diff --git a/docs-cn/21-tdinternal/03-taosd.md b/docs-cn/21-tdinternal/03-taosd.md index 6a5734102c85db291339ce93a2231cb8196053f6..0cf0a1aaa222e82f7ca6cc4f0314aa5a50442924 100644 --- a/docs-cn/21-tdinternal/03-taosd.md +++ b/docs-cn/21-tdinternal/03-taosd.md @@ -9,7 +9,7 @@ title: taosd的设计 taosd 包含 rpc,dnode,vnode,tsdb,query,cq,sync,wal,mnode,http,monitor 等模块,具体如下图: -![modules.png](/img/architecture/modules.png) +![TDengine Database module](./modules.webp) taosd 的启动入口是 dnode 模块,dnode 然后启动其他模块,包括可选配置的 http,monitor 模块。taosc 或 dnode 之间交互的消息都是通过 rpc 模块进行,dnode 模块根据接收到的消息类型,将消息分发到 vnode 或 mnode 的消息队列,或由 dnode 模块自己消费。dnode 的工作线程(worker)消费消息队列里的消息,交给 mnode 或 vnode 进行处理。下面对各个模块做简要说明。 @@ -44,13 +44,13 @@ RPC 模块还提供数据压缩功能,如果数据包的字节数超过系统 taosd 的消息消费由 dnode 通过读写线程池进行控制,是系统的中枢。该模块内的结构体图如下: -![dnode.png](/img/architecture/dnode.png) +![TDengine Database dnode](./dnode.webp) ## VNODE 模块 vnode 是一独立的数据存储查询逻辑单元,但因为一个 vnode 只能容许一个 DB ,因此 vnode 内部没有 account,DB,user 等概念。为实现更好的模块化、封装以及未来的扩展,它有很多子模块,包括负责存储的 TSDB,负责查询的 query,负责数据复制的 sync,负责数据库日志的的 WAL,负责连续查询的 cq(continuous query),负责事件触发的流计算的 event 等模块,这些子模块只与 vnode 模块发生关系,与其他模块没有任何调用关系。模块图如下: -![vnode.png](/img/architecture/vnode.png) +![TDengine Database vnode](./vnode.webp) vnode 模块向下,与 dnodeVRead,dnodeVWrite 发生互动,向上,与子模块发生互动。它主要的功能有: diff --git a/docs-cn/21-tdinternal/dnode.webp b/docs-cn/21-tdinternal/dnode.webp new file mode 100644 index 0000000000000000000000000000000000000000..a56c7e4594df00a721cb48381d68ca3bc813cdc8 Binary files /dev/null and b/docs-cn/21-tdinternal/dnode.webp differ diff --git a/docs-cn/21-tdinternal/message.webp b/docs-cn/21-tdinternal/message.webp new file mode 100644 index 0000000000000000000000000000000000000000..a2a42abff3d6e932b41a3abe9feae4a5cc13c9e5 Binary files /dev/null and b/docs-cn/21-tdinternal/message.webp differ diff --git a/docs-cn/21-tdinternal/modules.webp b/docs-cn/21-tdinternal/modules.webp new file mode 100644 index 0000000000000000000000000000000000000000..718a6abccdbe40d4a0df5e3812fe0ab943a7c523 Binary files /dev/null and b/docs-cn/21-tdinternal/modules.webp differ diff --git a/docs-cn/21-tdinternal/multi_tables.webp b/docs-cn/21-tdinternal/multi_tables.webp new file mode 100644 index 0000000000000000000000000000000000000000..8f649e34a3a62d1b11b4403b2e743ff6b5e47be2 Binary files /dev/null and b/docs-cn/21-tdinternal/multi_tables.webp differ diff --git a/docs-cn/21-tdinternal/replica-forward.webp b/docs-cn/21-tdinternal/replica-forward.webp new file mode 100644 index 0000000000000000000000000000000000000000..512efd4eba8f23ad0f8607eaaf5525f51ecdcf0e Binary files /dev/null and b/docs-cn/21-tdinternal/replica-forward.webp differ diff --git a/docs-cn/21-tdinternal/replica-master.webp b/docs-cn/21-tdinternal/replica-master.webp new file mode 100644 index 0000000000000000000000000000000000000000..57030a11f563af2689dbcfd206183f410b121aee Binary files /dev/null and b/docs-cn/21-tdinternal/replica-master.webp differ diff --git a/docs-cn/21-tdinternal/replica-restore.webp b/docs-cn/21-tdinternal/replica-restore.webp new file mode 100644 index 0000000000000000000000000000000000000000..f282c2d4d23f517e3ef08e906cea7e9c5edc0b2a Binary files /dev/null and b/docs-cn/21-tdinternal/replica-restore.webp differ diff --git a/docs-cn/21-tdinternal/structure.webp b/docs-cn/21-tdinternal/structure.webp new file mode 100644 index 0000000000000000000000000000000000000000..b77a42c074b15302b5c3ab889fb550a46dd549b3 Binary files /dev/null and b/docs-cn/21-tdinternal/structure.webp differ diff --git a/docs-cn/21-tdinternal/vnode.webp b/docs-cn/21-tdinternal/vnode.webp new file mode 100644 index 0000000000000000000000000000000000000000..fae3104c89c542c26790b509d12ad56661082c32 Binary files /dev/null and b/docs-cn/21-tdinternal/vnode.webp differ diff --git a/docs-cn/21-tdinternal/write_master.webp b/docs-cn/21-tdinternal/write_master.webp new file mode 100644 index 0000000000000000000000000000000000000000..9624036ed3d46ed60924ead9ce5c61acee0f4652 Binary files /dev/null and b/docs-cn/21-tdinternal/write_master.webp differ diff --git a/docs-cn/21-tdinternal/write_slave.webp b/docs-cn/21-tdinternal/write_slave.webp new file mode 100644 index 0000000000000000000000000000000000000000..7c45dec11b00e6a738de458f9e1bedacfad75a96 Binary files /dev/null and b/docs-cn/21-tdinternal/write_slave.webp differ diff --git a/docs-cn/25-application/01-telegraf.md b/docs-cn/25-application/01-telegraf.md index f63a6701eed2b4c5b98f577d5b2867ae6dada387..95df8699ef85b02d6e9dba398c787644fc9089b2 100644 --- a/docs-cn/25-application/01-telegraf.md +++ b/docs-cn/25-application/01-telegraf.md @@ -16,7 +16,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 本文介绍不需要写一行代码,通过简单修改几行配置文件,就可以快速搭建一个基于 TDengine + Telegraf + Grafana 的 IT 运维系统。架构如下图: -![IT-DevOps-Solutions-Telegraf.png](/img/IT-DevOps-Solutions-Telegraf.png) +![TDengine Database IT-DevOps-Solutions-Telegraf](./IT-DevOps-Solutions-Telegraf.webp) ## 安装步骤 @@ -75,7 +75,7 @@ sudo systemctl start telegraf 点击左侧齿轮图标并选择 `Plugins`,应该可以找到 TDengine data source 插件图标。 点击左侧加号图标并选择 `Import`,从 `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json` 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘: -![IT-DevOps-Solutions-telegraf-dashboard.png](/img/IT-DevOps-Solutions-telegraf-dashboard.png) +![TDengine Database IT-DevOps-Solutions-telegraf-dashboard](./IT-DevOps-Solutions-telegraf-dashboard.webp) ## 总结 diff --git a/docs-cn/25-application/02-collectd.md b/docs-cn/25-application/02-collectd.md index 5e6bc6577b2f4c8564e4533ced745d0b214ec748..78c61bb969092d7040ddcb3d02ce7bd29a784858 100644 --- a/docs-cn/25-application/02-collectd.md +++ b/docs-cn/25-application/02-collectd.md @@ -16,7 +16,7 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 本文介绍不需要写一行代码,通过简单修改几行配置文件,就可以快速搭建一个基于 TDengine + collectd / statsD + Grafana 的 IT 运维系统。架构如下图: -![IT-DevOps-Solutions-Collectd-StatsD.png](/img/IT-DevOps-Solutions-Collectd-StatsD.png) +![TDengine Database IT-DevOps-Solutions-Collectd-StatsD](./IT-DevOps-Solutions-Collectd-StatsD.webp) ## 安装步骤 @@ -81,12 +81,12 @@ repeater 部分添加 { host:'', port: - com.taosdata.jdbc - taos-jdbcdriver - 2.0.27 - +【 v2.2.1.5以后版本 】在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置: + +``` +locale C +charset UTF-8 ``` -### 14. taos connect failed, reason: invalid timestamp +### 14. JDBC 报错: the executed SQL is not a DML or a DDL? + +请更新至最新的 JDBC 驱动,参考 [Java 连接器](/reference/connector/java) + +### 15. taos connect failed, reason: invalid timestamp 常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。 -### 15. 表名显示不全 +### 16. 表名显示不全 由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。 -### 16. 如何进行数据迁移? +### 17. 如何进行数据迁移? TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机器 A 移动机器 B 时,注意如下两件事: @@ -156,7 +159,7 @@ TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机 - 2.0.7.0 及以后的版本,到/var/lib/taos/dnode 下,修复 dnodeEps.json 的 dnodeId 对应的 FQDN,重启。确保机器内所有机器的此文件是完全相同的。 - 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 -### 17. 如何在命令行程序 taos 中临时调整日志级别 +### 18. 如何在命令行程序 taos 中临时调整日志级别 为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令: @@ -177,7 +180,7 @@ ALTER LOCAL RESETLOG; -### 18. go 语言编写组件编译失败怎样解决? +### 19. go 语言编写组件编译失败怎样解决? TDengine 2.3.0.0 及之后的版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。 使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 @@ -192,7 +195,7 @@ go env -w GOPROXY=https://goproxy.cn,direct 如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用 `cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。 -### 19. 如何查询数据占用的存储空间大小? +### 20. 如何查询数据占用的存储空间大小? 默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。 @@ -201,3 +204,33 @@ go env -w GOPROXY=https://goproxy.cn,direct 若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。 若仅仅想查看指定(超级)表的数据块分布及大小,可查看[_block_dist 函数](https://docs.taosdata.com/taos-sql/select/#_block_dist-%E5%87%BD%E6%95%B0) + +### 21. 客户端连接串如何保证高可用? + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html) + +### 22. 时间戳的时区信息是怎样处理的? + +TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。 + +客户端在处理时间戳字符串时,会采取如下逻辑: + +1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。 +2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。 +3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。 +4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。 + +### 23. TDengine 2.0 都会用到哪些网络端口? + +使用到的网络端口请看文档:[serverport](/reference/config/#serverport) + +需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。 + +### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功?? + +taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。 + +需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。 + +有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/) + diff --git a/docs-cn/27-train-faq/03-docker.md b/docs-cn/27-train-faq/03-docker.md index 845a8751846c0995a43fb1c01e6ace3080176838..7791569b25e102b4634f0fb899fc0973cacc0aa1 100644 --- a/docs-cn/27-train-faq/03-docker.md +++ b/docs-cn/27-train-faq/03-docker.md @@ -209,7 +209,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0 Press enter key to continue or Ctrl-C to stop ``` - 回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。 + 回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.SanDieo"。 最后共插入 1 亿条记录。 @@ -279,7 +279,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0 $ taos> select groupid, location from test.d0; groupid | location | ================================= - 0 | shanghai | + 0 | California.SanDieo | Query OK, 1 row(s) in set (0.003490s) ``` diff --git a/docs-cn/eco_system.webp b/docs-cn/eco_system.webp new file mode 100644 index 0000000000000000000000000000000000000000..d60c38e97c67fa7b2acc703b2ba777d19ae5be13 Binary files /dev/null and b/docs-cn/eco_system.webp differ diff --git a/docs-en/01-index.md b/docs-en/01-index.md new file mode 100644 index 0000000000000000000000000000000000000000..d76c12e10fce24dff9f916945f5b6236857ebb8d --- /dev/null +++ b/docs-en/01-index.md @@ -0,0 +1,27 @@ +--- +title: TDengine Documentation +sidebar_label: Documentation Home +slug: / +--- + +TDengine is a [high-performance](https://tdengine.com/fast), [scalable](https://tdengine.com/scalable) time series database with [SQL support](https://tdengine.com/sql-support). This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators. + +To get a global view about TDengine, like feature list, benchmarks, and competitive advantages, please browse through section [Introduction](./intro). + +TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly. + +If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work. + +We live in the era of big data, and scale-up is unable to meet the growing business needs. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["cluster"](./cluster). + +TDengine uses ubiquitious SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll up, interpolation and time weighted average, among many others. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions. + +If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the ["Administration"](./operation) section. + +If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter. + +If you are very interested in the internal design of TDengine, please read the chapter ["Inside TDengine”](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully. + +TDengine is an open source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation, or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly. + +Together, we make a difference. diff --git a/docs-en/01-intro/eco_system.png b/docs-en/01-intro/eco_system.png deleted file mode 100644 index bf8bf8f1e0a2311fc12202d712a8a2f9b8ce419b..0000000000000000000000000000000000000000 Binary files a/docs-en/01-intro/eco_system.png and /dev/null differ diff --git a/docs-en/01-intro/_category_.yml b/docs-en/02-intro/_category_.yml similarity index 100% rename from docs-en/01-intro/_category_.yml rename to docs-en/02-intro/_category_.yml diff --git a/docs-en/02-intro/eco_system.webp b/docs-en/02-intro/eco_system.webp new file mode 100644 index 0000000000000000000000000000000000000000..d60c38e97c67fa7b2acc703b2ba777d19ae5be13 Binary files /dev/null and b/docs-en/02-intro/eco_system.webp differ diff --git a/docs-en/01-intro/01-intro.md b/docs-en/02-intro/index.md similarity index 68% rename from docs-en/01-intro/01-intro.md rename to docs-en/02-intro/index.md index c3e86fcbee41aa847134958225b1b856354a2444..f6766f910f4d7560b782bf02ffa97922523e6167 100644 --- a/docs-en/01-intro/01-intro.md +++ b/docs-en/02-intro/index.md @@ -5,39 +5,39 @@ toc_max_heading_level: 2 TDengine is a high-performance, scalable time-series database with SQL support. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](/develop/cache), [stream processing](/develop/continuous-query), [data subscription](/develop/subscribe) and other functionalities to reduce the complexity and cost of development and operation. -This section introduces the major features, competitive advantages, suited scenarios and benchmarks to help you get a high level picture for TDengine. +This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine. ## Major Features The major features are listed below: -1. Besides [using SQL to insert](/develop/insert-data/sql-writing),it supports [Schemaless writing](/reference/schemaless/),and it supports [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) and other protocols. -2. Support for seamless integration with third-party data collection agents like [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQX](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker). Without a line of code, those agents can write data points into TDengine just by configuration. -3. Support for [all kinds of queries](/develop/query-data), including aggregation, nested query, downsampling, interpolation, etc. -4. Support for [user defined functions](/develop/udf) +1. While TDengine supports [using SQL to insert](/develop/insert-data/sql-writing), it also supports [Schemaless writing](/reference/schemaless/) just like NoSQL databases. TDengine also supports standard protocols like [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) among others. +2. TDengine supports seamless integration with third-party data collection agents like [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQX](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker). These agents can write data into TDengine with simple configuration and without a single line of code. +3. Support for [all kinds of queries](/develop/query-data), including aggregation, nested query, downsampling, interpolation and others. +4. Support for [user defined functions](/develop/udf). 5. Support for [caching](/develop/cache). TDengine always saves the last data point in cache, so Redis is not needed in some scenarios. 6. Support for [continuous query](/develop/continuous-query). 7. Support for [data subscription](/develop/subscribe) with the capability to specify filter conditions. 8. Support for [cluster](/cluster/), with the capability of increasing processing power by adding more nodes. High availability is supported by replication. -9. Provides interactive [command-line intrerface](/reference/taos-shell) for management, maintainence and ad-hoc query. +9. Provides an interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc queries. 10. Provides many ways to [import](/operation/import) and [export](/operation/export) data. -11. Provides [monitoring](/operation/monitor) on TDengine running instances. +11. Provides [monitoring](/operation/monitor) on running instances of TDengine. 12. Provides [connectors](/reference/connector/) for [C/C++](/reference/connector/cpp), [Java](/reference/connector/java), [Python](/reference/connector/python), [Go](/reference/connector/go), [Rust](/reference/connector/rust), [Node.js](/reference/connector/node) and other programming languages. 13. Provides a [REST API](/reference/rest-api/). -14. Supports the seamless integration with [Grafana](/third-party/grafana) for visualization. +14. Supports seamless integration with [Grafana](/third-party/grafana) for visualization. 15. Supports seamless integration with Google Data Studio. -For more detail on features, please read through the whole documentation. +For more details on features, please read through the entire documentation. ## Competitive Advantages -TDengine makes full use of [the characteristics of time series data](https://tdengine.com/2019/07/09/86.html), such as structured, no transaction, rarely delete or update, etc., and builds its own innovative storage engine and computing engine to differentiate itself from other time series databases with the following advantages. +Time-series data is structured, not transactional, and is rarely deleted or updated. TDengine makes full use of [these characteristics of time series data](https://tdengine.com/2019/07/09/86.html) to build its own innovative storage engine and computing engine to differentiate itself from other time series databases, with the following advantages. -- **High Performance**: TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage cost and compute costs, with an innovatively designed and purpose-built storage engine. +- **[High Performance](https://tdengine.com/fast)**: With an innovatively designed and purpose-built storage engine, TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage costs and compute costs. -- **Scalable**: TDengine provides out-of-box scalability and high-availability through its native distributed design. Nodes can be added through simple configuration to achieve greater data processing power. In addition, this feature is open source. +- **[Scalable](https://tdengine.com/scalable)**: TDengine provides out-of-box scalability and high-availability through its native distributed design. Nodes can be added through simple configuration to achieve greater data processing power. In addition, this feature is open source. -- **SQL Support**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to handle time-series data better, and supporting convenient and flexible schemaless data ingestion. +- **[SQL Support](https://tdengine.com/sql-support)**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to better handle time-series. Keeping NoSQL developers in mind, TDengine also supports convenient and flexible, schemaless data ingestion. - **All in One**: TDengine has built-in caching, stream processing and data subscription functions. It is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software in some scenarios. It makes the system architecture much simpler, cost-effective and easier to maintain. @@ -45,24 +45,24 @@ TDengine makes full use of [the characteristics of time series data](https://tde - **Zero Management**: Installation and cluster setup can be done in seconds. Data partitioning and sharding are executed automatically. TDengine’s running status can be monitored via Grafana or other DevOps tools. -- **Zero Learning Costs**: With SQL as the query language and support for ubiquitous tools like Python, Java, C/C++, Go, Rust, and Node.js connectors, there are zero learning costs. +- **Zero Learning Costs**: With SQL as the query language and support for ubiquitous tools like Python, Java, C/C++, Go, Rust, and Node.js connectors, and a REST API, there are zero learning costs. -- **Interactive Console**: TDengine provides convenient console access to the database to run ad hoc queries, maintain the database, or manage the cluster without any programming. +- **Interactive Console**: TDengine provides convenient console access to the database, through a CLI, to run ad hoc queries, maintain the database, or manage the cluster, without any programming. -With TDengine, the total cost of ownership of time-seriess data platform can be greatly reduced. Because 1: with its superior performance, the computing and storage resources are reduced significantly; 2:with SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly; 3: with its simple architecture and zero management, the operation and maintainence costs are reduced. +With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly 2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly 3: With its simple architecture and zero management, the operation and maintenance costs are reduced. ## Technical Ecosystem -In the time-series data processing platform, TDengine stands in a role like this diagram below: +This is how TDengine would be situated, in a typical time-series data processing platform: -![TDengine Technical Ecosystem ](eco_system.png) +![TDengine Database Technical Ecosystem ](eco_system.webp)
Figure 1. TDengine Technical Ecosystem
-On the left side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides interactive command-line interface and web interface for management and maintainence. +On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance. -## Suited Scenarios +## Typical Use Cases -As a high-performance, scalable and SQL supported time-series database, TDengine's typical application scenarios include but are not limited to IoT, Industrial Internet, Connected Vehicles, IT operation and maintenance, energy, financial markets and other fields. TDengine is a purpose-built database optimized for the characteristics of time series data, it cannot be used to process data from web crawlers, social media, e-commerce, ERP, CRM, etc. This section makes a more detailed analysis of the applicable scenarios. +As a high-performance, scalable and SQL supported time-series database, TDengine's typical use case include but are not limited to IoT, Industrial Internet, Connected Vehicles, IT operation and maintenance, energy, financial markets and other fields. TDengine is a purpose-built database optimized for the characteristics of time series data. As such, it cannot be used to process data from web crawlers, social media, e-commerce, ERP, CRM and so on. More generally TDengine is not a suitable storage engine for non-time-series data. This section makes a more detailed analysis of the applicable scenarios. ### Characteristics and Requirements of Data Sources @@ -103,7 +103,7 @@ As a high-performance, scalable and SQL supported time-series database, TDengine | Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the Taos shell for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.| | Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.| -## Comparision with other databases +## Comparison with other databases - [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/2022/02/23/4975.html) - [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/2022/02/24/5120.html) diff --git a/docs-en/03-get-started/_pkg_install.mdx b/docs-en/03-get-started/_pkg_install.mdx deleted file mode 100644 index 83c987af8bcf24a9593105b680d32a0421344d5f..0000000000000000000000000000000000000000 --- a/docs-en/03-get-started/_pkg_install.mdx +++ /dev/null @@ -1,17 +0,0 @@ -import PkgList from "/components/PkgList"; - -TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。 - -为方便使用,从 2.4.0.10 开始,标准的服务端安装包包含了 taos、taosd、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。 - -在安装包格式上,我们提供 tar.gz, rpm 和 deb 格式,为企业客户提供 tar.gz 格式安装包,以方便在特定操作系统上使用。需要注意的是,rpm 和 deb 包不含 taosdump、taosBenchmark 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。 - -发布版本包括稳定版和 Beta 版,Beta 版含有更多新功能。正式上线或测试建议安装稳定版。您可以根据需要选择下载: - - - -具体的安装方法,请参见[安装包的安装和卸载](/operation/pkg-install)。 - -下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/all-downloads) - -查看 Release Notes, 请点击[这里](https://github.com/taosdata/TDengine/releases) diff --git a/docs-en/02-concept/_category_.yml b/docs-en/04-concept/_category_.yml similarity index 100% rename from docs-en/02-concept/_category_.yml rename to docs-en/04-concept/_category_.yml diff --git a/docs-en/02-concept/02-concept.md b/docs-en/04-concept/index.md similarity index 78% rename from docs-en/02-concept/02-concept.md rename to docs-en/04-concept/index.md index f71674fc0ddc483c1c3371e56bdf17c39506f985..850f705146c4829db579f14be1a686ef9052f678 100644 --- a/docs-en/02-concept/02-concept.md +++ b/docs-en/04-concept/index.md @@ -2,7 +2,7 @@ title: Concepts --- -In order to explain the basic concepts and provide some sample code, the TDengine documentation takes smart meters as a typical time series data scenario. Assuming that each smart meter collects three metrics of current, voltage, and phase, there are multiple smart meters, and each meter has static attributes like location and group ID, the collected data will be similar to the following table: +In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase 2. There are multiple smart meters, and 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table:
@@ -29,7 +29,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -38,7 +38,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -47,7 +47,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -56,7 +56,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -65,7 +65,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -74,7 +74,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -83,7 +83,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -92,7 +92,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin - + @@ -112,7 +112,7 @@ Label/Tag refers to the static properties of sensors, equipment or other types o ## Data Collection Point -Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipments, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car, so in this example the car would have three data collection points. +Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. ## Table @@ -122,10 +122,10 @@ To make full use of time-series data characteristics, TDengine adopts a strategy 1. Since the metric data from different DCP are fully independent, the data source of each DCP is unique, and a table has only one writer. In this way, data points can be written in a lock-free manner, and the writing speed can be greatly improved. 2. For a DCP, the metric data generated by DCP is ordered by timestamp, so the write operation can be implemented by simple appending, which further greatly improves the data writing speed. -3. The metric data from a DCP is continuously stored in block by block. If you read data for a period of time, it can greatly reduce random read operations and improve read and query performance by orders of magnitude. -4. Inside a data block for a DCP, columnar storage is used, and different compression algorithms are used for different data types. Metrics generally don't vary as significantly between themselves over a time range as compared to other metrics, this allows for a higher compression rate. +3. The metric data from a DCP is continuously stored, block by block. If you read data for a period of time, it can greatly reduce random read operations and improve read and query performance by orders of magnitude. +4. Inside a data block for a DCP, columnar storage is used, and different compression algorithms are used for different data types. Metrics generally don't vary as significantly between themselves over a time range as compared to other metrics, which allows for a higher compression rate. -If the metric data of multiple DCPs are traditionally written into a single table, due to the uncontrollable network delay, the timing of the data from different DCPs arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest extent.** +If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.** TDengine suggests using DCP ID as the table name (like D1001 in the above table). Each DCP may collect one or multiple metrics (like the current, voltage, phase as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. Column wise storage is used. @@ -139,7 +139,7 @@ In the design of TDengine, **a table is used to represent a specific data collec ## Subtable -When creating a table for a specific data collection point, the user can use a STable as a template and specifies the tag values of this specific DCP to create it. **The table created by using a STable as the template is called subtable** in TDengine. The difference between regular table and subtable is: +When creating a table for a specific data collection point, the user can use a STable as a template and specify the tag values of this specific DCP to create it. **The table created by using a STable as the template is called subtable** in TDengine. The difference between regular table and subtable is: 1. Subtable is a table, all SQL commands applied on a regular table can be applied on subtable. 2. Subtable is a table with extensions, it has static tags (labels), and these tags can be added, deleted, and updated after it is created. But a regular table does not have tags. 3. A subtable belongs to only one STable, but a STable may have many subtables. Regular tables do not belong to a STable. @@ -151,9 +151,9 @@ The relationship between a STable and the subtables created based on this STable 2. The schema of metrics or labels cannot be adjusted through subtables, and it can only be changed via STable. Changes to the schema of a STable takes effect immediately for all associated subtables. 3. STable defines only one template and does not store any data or label information by itself. Therefore, data cannot be written to a STable, only to subtables. -Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of data aggregation across multiple DCPs. +Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs. -In TDengine, it is recommended to use a substable instead of a regular table for a DCP. +In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. ## Database @@ -167,4 +167,4 @@ FQDN (Fully Qualified Domain Name) is the full domain name of a specific compute Each node of a TDengine cluster is uniquely identified by an End Point, which consists of an FQDN and a Port, such as h1.tdengine.com:6030. In this way, when the IP changes, we can still use the FQDN to dynamically find the node without changing any configuration of the cluster. In addition, FQDN is used to facilitate unified access to the same cluster from the Intranet and the Internet. -TDengine does not recommend using an IP address to access the cluster, FQDN is recommended for cluster management. +TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management. diff --git a/docs-en/04-develop/01-connect/_connect_python.mdx b/docs-en/04-develop/01-connect/_connect_python.mdx deleted file mode 100644 index f6c8bcfee1d92fae2d1ad320002b805dd9951228..0000000000000000000000000000000000000000 --- a/docs-en/04-develop/01-connect/_connect_python.mdx +++ /dev/null @@ -1,3 +0,0 @@ -```python title="Native Connection" -{{#include docs-examples/python/connect_exmaple.py}} -``` diff --git a/docs-en/04-develop/02-model/index.mdx b/docs-en/04-develop/02-model/index.mdx deleted file mode 100644 index 2bd6f0cbd9f1c5b62a3f14f03c93c825f0a8cdaf..0000000000000000000000000000000000000000 --- a/docs-en/04-develop/02-model/index.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: Data Model ---- - -The data model employed by TDengine is similar to relational database, you need to create databases and tables. For a specific application, the design of databases, STables (abbreviated for super table), and tables need to be considered. This chapter will explain the big picture without syntax details. - -## Create Database - -The characteristics of data from different data collection points may be different, such as collection frequency, days to keep, number of replicas, data block size, whether it's allowed to update data, etc. For TDengine to operate with the best performance, it's strongly suggested to put the data with different characteristics into different databases because different storage policy can be set for each database. When creating a database, there are a lot of parameters that can be configured, such as the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, compress or not, the time range of the data in single data file, etc. Below is an example of the SQL statement for creating a database. - -```sql -CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1; -``` - -In the above SQL statement, a database named "power" will be created, the data in it will be kept for 365 days, which means the data older than 365 days will be deleted automatically, a new data file will be created every 10 days, the number of memory blocks is 6, data is allowed to be updated. For more details please refer to [Database](/taos-sql/database). - -After creating a database, the current database in use can be switched using SQL command `USE`, for example below SQL statement switches the current database to `power`. Without current database specified, table name must be preceded with the corresponding database name. - -```sql -USE power; -``` - -:::note - -- Any table or STable must belong to a database. To create a table or STable, the database it belongs to must be ready. -- JOIN operation can't be performed tables from two different databases. -- Timestamp needs to be specified when inserting rows or querying historical rows. - -::: - -## Create STable - -In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/tdinternal/arch#model_table1), below SQL statement can be used to create the super table. - -```sql -CREATE STable meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); -``` - -:::note -If you are using versions prior to 2.0.15, the `STable` keyword needs to be replaced with `TABLE`. - -::: - -Similar to creating a regular table, when creating a STable, name and schema need to be provided too. In the STable schema, the first column must be timestamp (like ts in the example), and other columns (like current, voltage and phase in the example) are the data collected. The type of a column can be integer, float, double, string ,etc. Besides, the schema for tags need to be provided, like location and groupId in the example. The type of a tag can be integer, float, string, etc. The static properties of a data collection point can be defined as tags, like the location, device type, device group ID, manager ID, etc. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details. - -For each kind of data collection points, a corresponding STable must be created. There may be man y STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another point for environmental data like temperature, humidity and wind direction, multiple STables are required for such kind of device. - -At most 4096 (or 1024 prior to version 2.1.7.0) columns are allowed in a STable. If there are more than 4096 of metrics to bo collected for a data collection point, multiple STables are required for such kind of data collection point. There can be multiple databases in system, while one or more STables can exist in a database. - -## Create Table - -A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Beside, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement. - -```sql -CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); -``` - -In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "Beijing.Chaoyang" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details. - -In TDengine system, it's recommended to create a table for a data collection point via STable. Table created via STable is called subtable in some parts of TDengine document. All SQL commands applied on regular table can be applied on subtable. - -:::warning -It's not recommended to create a table in a database while using a STable from another database as template. - -:::tip -It's suggested to use the global unique ID of a data collection point as the table name, for example the device serial number. If there isn't such a unique ID, multiple IDs that are not global unique can be combined to form a global unique ID. It's not recommended to use a global unique ID as tag value. - -## Create Table Automatically - -In some circumstances, it's not sure whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exist. - -```sql -INSERT INTO d1001 USING meters TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); -``` - -In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"Beijing.Chaoyang", 2`. - -For more details please refer to [Create Table Automatically](/taos-sql/insert#automatically-create-table-when-inserting). - -## Single Column vs Multiple Column - -Multiple columns data model is supported in TDengine. As long as multiple metrics are collected by same data collection point at same time, i.e. the timestamp are identical, these metrics can be put in single stable as columns. However, there is another kind of design, i.e. single column data model, a table is created for each metric, which means a STable is required for each kind of metric. For example, 3 STables are required for current, voltage and phase. - -It's recommended to use multiple column data model as much as possible because it's better in the performance of inserting or querying rows. In some cases, however, the metrics to be collected vary frequently and correspondingly the STable schema needs to be changed frequently too. In such case, it's more convenient to use single column data model. diff --git a/docs-en/04-develop/03-insert-data/index.md b/docs-en/04-develop/03-insert-data/index.md deleted file mode 100644 index ee80d436f11f19b422df261845f1c209620251f2..0000000000000000000000000000000000000000 --- a/docs-en/04-develop/03-insert-data/index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Insert ---- - -TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, OpenTSDB JSON protocol. Data can be inserted row by row, or in batch. Data from one or more collecting points can be inserted simultaneously. In the meantime, data can be inserted with multiple threads, out of order data and historical data can be inserted too. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create stable and table in advance if using schemaless protocols, and the schemas can be adjusted automatically according to the data to be inserted. - -```mdx-code-block -import DocCardList from '@theme/DocCardList'; -import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; - - -``` \ No newline at end of file diff --git a/docs-en/04-develop/04-query-data/_category_.yml b/docs-en/04-develop/04-query-data/_category_.yml deleted file mode 100644 index 5912a48fc31ed36235c0d34d8b0909bf3b518aaa..0000000000000000000000000000000000000000 --- a/docs-en/04-develop/04-query-data/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: Select Data diff --git a/docs-en/04-develop/05-continuous-query.mdx b/docs-en/04-develop/05-continuous-query.mdx deleted file mode 100644 index 97e32a17ff325a9f67ac0a732be3dd72ccca8888..0000000000000000000000000000000000000000 --- a/docs-en/04-develop/05-continuous-query.mdx +++ /dev/null @@ -1,83 +0,0 @@ ---- -sidebar_label: Continuous Query -description: "Continuous query is a query that's executed automatically according to predefined frequency to provide aggregate query capability by time window, it's actually a simplified time driven stream computing." -title: "Continuous Query" ---- - -Continuous query is a query that's executed automatically according to predefined frequency to provide aggregate query capability by time window, it's actually a simplified time driven stream computing. Continuous query can be performed on a table or STable in TDengine. The result of continuous query can be pushed to client or written back to TDengine. Each query is executed on a time window, which moves forward with time. The size of time window and the forward sliding time need to be specified with parameter `INTERVAL` and `SLIDING` respectively. - -Continuous query in TDengine is time driven, and can be defined using TAOS SQL directly without any extra operations. With continuous query, the result can be generated according to time window to achieve down sampling of original data. Once a continuous query is defined using TAOS SQL, the query is automatically executed at the end of each time window and the result is pushed back to client or written to TDengine. - -There are some differences between continuous query in TDengine and time window computation in stream computing: - -- The computation is performed and the result is returned in real time in stream computing, but the computation in continuous query is only started when a time window closes. For example, if the time window is 1 day, then the result will only be generated at 23:59:59. -- If a historical data row is written in to a time widow for which the computation has been finished, the computation will not be performed again and the result will not be pushed to client again either. If the result has been written into TDengine, there will be no update for the result. -- In continuous query, if the result is pushed to client, the client status is not cached on the server side and Exactly-once is not guaranteed by the server either. If the client program crashes, a new time window will be generated from the time where the continuous query is restarted. If the result is written into TDengine, the data written into TDengine can be guaranteed as valid and continuous. - -## Syntax - -```sql -[CREATE TABLE AS] SELECT select_expr [, select_expr ...] - FROM {tb_name_list} - [WHERE where_condition] - [INTERVAL(interval_val [, interval_offset]) [SLIDING sliding_val]] - -``` - -INTERVAL: The time window for which continuous query is performed - -SLIDING: The time step for which the time window moves forward each time - -## How to Use - -In this section the use case of meters will be used to introduce how to use continuous query. Assume the STable and sub tables have been created using below SQL statement. - -```sql -create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int); -create table D1001 using meters tags ("Beijing.Chaoyang", 2); -create table D1002 using meters tags ("Beijing.Haidian", 2); -``` - -The average voltage for each time window of one minute with 30 seconds as the length of moving forward can be retrieved using below SQL statement. - -```sql -select avg(voltage) from meters interval(1m) sliding(30s); -``` - -Whenever the above SQL statement is executed, all the existing data will be computed again. If the computation needs to be performed every 30 seconds automatically to compute on the data in the past one minute, the above SQL statement needs to be revised as below, in which `{startTime}` stands for the beginning timestamp in the latest time window. - -```sql -select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s); -``` - -Another easier way for same purpose is prepend `create table {tableName} as` before the `select`. - -```sql -create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s); -``` - -A table named as `avg_vol` will be created automatically, then every 30 seconds the `select` statement will be executed automatically on the data in the past 1 minutes, i.e. the latest time window, and the result is written into table `avg_vol`. The client program just needs to query from table `avg_vol`. For example: - -```sql -taos> select * from avg_vol; - ts | avg_voltage_ | -=================================================== - 2020-07-29 13:37:30.000 | 222.0000000 | - 2020-07-29 13:38:00.000 | 221.3500000 | - 2020-07-29 13:38:30.000 | 220.1700000 | - 2020-07-29 13:39:00.000 | 223.0800000 | -``` - -Please be noted that the minimum allowed time window is 10 milliseconds, and no upper limit. - -Besides, it's allowed to specify the start and end time of continuous query. If the start time is not specified, the timestamp of the first original row will be considered as the start time; if the end time is not specified, the continuous will be performed infinitely, otherwise it will be terminated once the end time is reached. For example, the continuous query in below SQL statement will be started from now and terminated one hour later. - -```sql -create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s); -``` - -`now` in above SQL statement stands for the time when the continuous query is created, not the time when the computation is actually performed. Besides, to avoid the trouble caused by the delay of original data as much as possible, the actual computation in continuous query is also started with a little delay. That means, once a time window closes, the computation is not started immediately. Normally, the result can only be available a little time later, normally within one minute, after the time window closes. - -## How to Manage - -`show streams` command can be used in TDengine CLI `taos` to show all the continuous queries in the system, and `kill stream` can be used to terminate a continuous query. diff --git a/docs-en/04-develop/06-subscribe.mdx b/docs-en/04-develop/06-subscribe.mdx deleted file mode 100644 index 45b13d94c45e62ea8efb6e45e798a71e8cb16cba..0000000000000000000000000000000000000000 --- a/docs-en/04-develop/06-subscribe.mdx +++ /dev/null @@ -1,257 +0,0 @@ ---- -sidebar_label: Subscription -description: "Lightweight service for data subscription and pushing, the time series data inserted into TDengine continuously can be pushed automatically to the subscribing clients." -title: Data Subscription ---- - -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; -import Java from "./_sub_java.mdx"; -import Python from "./_sub_python.mdx"; -import Go from "./_sub_go.mdx"; -import Rust from "./_sub_rust.mdx"; -import Node from "./_sub_node.mdx"; -import CSharp from "./_sub_cs.mdx"; -import CDemo from "./_sub_c.mdx"; - -## Introduction - -According to the time series nature of the data, data inserting in TDengine is similar to data publishing in message queues, they both can be considered as a new data record with timestamp is inserted into the system. Data is stored in ascending order of timestamp inside TDengine, so essentially each table in TDengine can be considered as a message queue. - -Lightweight service for data subscription and pushing is built in TDengine. With the API provided by TDengine, client programs can used `select` statement to subscribe the data from one or more tables. The subscription and and state maintenance is performed on the client side, the client programs polls the server to check whether there is new data, and if so the new data will be pushed back to the client side. If the client program is restarted, where to start for retrieving new data is up to the client side. - -There are 3 major APIs related to subscription provided in the TDengine client driver. - -```c -taos_subscribe -taos_consume -taos_unsubscribe -``` - -For more details about these API please refer to [C/C++ Connector](/reference/connector/cpp). Their usage will be introduced below using the use case of meters, in which the schema of STable and sub tables please refer to the previous section "continuous query". Full sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c). - -If we want to get notification and take some actions if the current exceeds a threshold, like 10A, from some meters, there are two ways: - -The first way is to query on each sub table and record the last timestamp matching the criteria, then after some time query on the data later than recorded timestamp and repeat this process. The SQL statements for this way are as below. - -```sql -select * from D1001 where ts > {last_timestamp1} and current > 10; -select * from D1002 where ts > {last_timestamp2} and current > 10; -... -``` - -The above way works, but the problem is that the number of `select` statements increases with the number of meters grows. Finally the performance of both client side and server side will be unacceptable once the number of meters grows to a big enough number. - -A better way is to query on the STable, only one `select` is enough regardless of the number of meters, like below: - -```sql -select * from meters where ts > {last_timestamp} and current > 10; -``` - -However, how to choose `last_timestamp` becomes a new problem if using this way. Firstly, the timestamp when the data is generated is different from the timestamp when the data is inserted into the database, sometimes the difference between them may be very big. Secondly, the time when the data from different meters may arrives at the database may be different too. If the timestamp of the "slowest" meter is used as `last_timestamp` in the query, the data from other meters may be selected repeatedly; but if the timestamp of the "fasted" meters is used as `last_timestamp`, some data from other meters may be missed. - -All the problems mentioned above can be resolved thoroughly using subscription provided by TDengine. - -The first step is to create subscription using `taos_subscribe`. - -```c -TAOS_SUB* tsub = NULL; -if (async) { -  // create an asynchronized subscription, the callback function will be called every 1s -  tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000); -} else { -  // create an synchronized subscription, need to call 'taos_consume' manually -  tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0); -} -``` - -The subscription in TDengine can be either synchronous or asynchronous. In the above sample code, the value of variable `async` is determined from the CLI input, then it's used to create either an async or sync subscription. Sync subscription means the client program needs to invoke `taos_consume` to retrieve data, and async subscription means another thread created by `taos_subscribe` internally invokes `taos_consume` to retrieve data and pass the data to `subscribe_callback` for processing, `subscribe_callback` is a call back function provided by the client program and it's suggested not to do time consuming operation in the call back function. - -The parameter `taos` is an established connection. There is nothing special in sync subscription mode. In async subscription, it should be exclusively by current thread, otherwise unpredictable error may occur. - -The parameter `sql` is a `select` statement in which `where` clause can be used to specify filter conditions. In our example, the data whose current exceeds 10A needs to be subscribed like below SQL statement: - -```sql -select * from meters where current > 10; -``` - -Please be noted that, all the data will be processed because no start time is specified. If only the data from one day ago needs to be processed, a time related condition can be added: - -```sql -select * from meters where ts > now - 1d and current > 10; -``` - -The parameter `topic` is the name of the subscription, it needs to be guaranteed unique in the client program, but it's not necessary to be globally unique because subscription is implemented in the APIs on client side. - -If the subscription named as `topic` doesn't exist, parameter `restart` would be ignored. If the subscription named as `topic` has been created before by the client program which then exited, when the client program is restarted to use this `topic`, parameter `restart` is used to determine retrieving data from beginning or from the last point where the subscription was broken. If the value of `restart` is **true** (i.e. a non-zero value), the data will be retrieved from beginning, or if it is **false** (i.e. zero), the data already consumed before will not be processed again. - -The last parameter of `taos_subscribe` is the polling interval in unit of millisecond. In sync mode, if the time difference between two continuous invocations to `taos_consume` is smaller than the interval specified by `taos_subscribe`, `taos_consume` would be blocked until the interval is reached. In async mode, this interval is the minimum interval between two invocations to the call back function. - -The last second parameter of `taos_subscribe` is used to pass arguments to the call back function. `taos_subscribe` doesn't process this parameter and simply passes it to the call back function. This parameter is simply ignored in sync mode. - -After a subscription is created, its data can be consumed and processed, below is the sample code of how to consume data in sync mode, in the else part if `if (async)`. - -```c -if (async) { -  getchar(); -} else while(1) { -  TAOS_RES* res = taos_consume(tsub); -  if (res == NULL) { -    printf("failed to consume data."); -    break; -  } else { -    print_result(res, blockFetch); -    getchar(); -  } -} -``` - -In the above sample code, there is an infinite loop, each time carriage return is entered `taos_consume` is invoked, the return value of `taos_consume` is the selected result set, exactly as the input of `taos_use_result`, in the above sample `print_result` is used instead to simplify the sample. Below is the implementation of `print_result`. - -```c -void print_result(TAOS_RES* res, int blockFetch) { -  TAOS_ROW row = NULL; -  int num_fields = taos_num_fields(res); -  TAOS_FIELD* fields = taos_fetch_fields(res); -  int nRows = 0; -  if (blockFetch) { -    nRows = taos_fetch_block(res, &row); -    for (int i = 0; i < nRows; i++) { -      char temp[256]; -      taos_print_row(temp, row + i, fields, num_fields); -      puts(temp); -    } -  } else { -    while ((row = taos_fetch_row(res))) { -      char temp[256]; -      taos_print_row(temp, row, fields, num_fields); -      puts(temp); -      nRows++; -    } -  } -  printf("%d rows consumed.\n", nRows); -} -``` - -In the above code `taos_print_row` is used to process the data consumed. All the matching rows will be printed. - -In async mode, the data consuming is simpler as below. - -```c -void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { -  print_result(res, *(int*)param); -} -``` - -`taos_unsubscribe` can be invoked to terminate a subscription. - -```c -taos_unsubscribe(tsub, keep); -``` - -The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value in when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with same name as `topic` for each subscription, the subscription will be restarted from beginning if the corresponding progress file is removed. - -Now let's see the effect of the above sample code, assuming below prerequisites have been done. - -- The sample code has been downloaded to local system 示 -- TDengine has been installed and launched properly on same system -- The database, STable, sub tables required in the sample code have been ready - -It's ready to launch below command in the directory where the sample code resides to compile and start the program. - -```bash -make -./subscribe -sql='select * from meters where current > 10;' -``` - -After the program is started, open another terminal and launch TDengine CLI `taos`, then use below SQL commands to insert a row whose current is 12A into table **D1001**. - -```sql -use test; -insert into D1001 values(now, 12, 220, 1); -``` - -Then, this row of data will be shown by the example program on the first terminal because its current exceeds 10A. More data can be inserted for you to observe the output of the example program. - -## Examples - -Below example program demonstrates how to subscribe the data rows whose current exceeds 10A using connectors. - -### Prepare Data - -```bash -# create database "power" -taos> create database power; -# use "power" as the database in following operations -taos> use power; -# create super table "meters" -taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int); -# create tabes using the schema defined by super table "meters" -taos> create table d1001 using meters tags ("Beijing.Chaoyang", 2); -taos> create table d1002 using meters tags ("Beijing.Haidian", 2); -# insert some rows -taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); -taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); -# filter out the rows in which current is bigger than 10A -taos> select * from meters where current > 10; - ts | current | voltage | phase | location | groupid | -=========================================================================================================== - 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 | - 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 | - 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 | - 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 | - 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 | -Query OK, 5 row(s) in set (0.004896s) -``` - -### Example Programs - - - - - - - - - {/* - - */} - - - - {/* - - - - - */} - - - - - -### Run the Examples - -The example programs firstly consume all historical data matching the criteria. - -```bash -ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2 -ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2 -ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2 -ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2 -ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2 -``` - -Next, use TDengine CLI to insert a new row. - -``` -# taos -taos> use power; -taos> insert into d1001 values(now, 12.4, 220, 1); -``` - -Because the current in inserted row exceeds 10A, it will be consumed by the example program. - -``` -ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2 -``` diff --git a/docs-en/04-develop/07-cache.md b/docs-en/04-develop/07-cache.md deleted file mode 100644 index 13db6c363802abed290cfc4d4466d40e48852f3d..0000000000000000000000000000000000000000 --- a/docs-en/04-develop/07-cache.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -sidebar_label: Cache -title: Cache -description: "The latest row of each table is kept in cache to provide high performance query of latest state." ---- - -The cache management policy in TDengine is First-In-First-Out (FIFO), which is also known as insert driven cache management policy and different from read driven cache management, i.e. Least-Recent-Used (LRU). It simply stores the latest data in cache and flushes the oldest data in cache to disk when the cache usage reaches a threshold. In IoT use cases, the most cared about data is the latest data, i.e. current state. The cache policy in TDengine is based the nature of IoT data. - -Caching the latest data provides the capability of retrieving data in milliseconds. With this capability, TDengine can be configured properly to be used as caching system without deploying another separate caching system to simplify the system architecture and minimize the operation cost. The cache will be emptied after TDengine is restarted, TDengine doesn't reload data from disk into cache like a real key-value caching system. - -The memory space used by TDengine cache is fixed in size, according to the configuration based on application requirement and system resources. Independent memory pool is allocated for and managed by each vnode (virtual node) in TDengine, there is no sharing of memory pools between vnodes. All the tables belonging to a vnode share all the cache memory of the vnode. - -Memory pool is divided into blocks and data is stored in row format in memory and each block follows FIFO policy. The size of each block is determined by configuration parameter `cache`, the number of blocks for each vnode is determined by `blocks`. For each vnode, the total cache size is `cache * blocks`. It's better to set the size of each block to hold at least tends of rows. - -`last_row` function can be used to retrieve the last row of a table or a STable to quickly show the current state of devices on monitoring screen. For example below SQL statement retrieves the latest voltage of all meters in Chaoyang district of Beijing. - -```sql -select last_row(voltage) from meters where location='Beijing.Chaoyang'; -``` diff --git a/docs-en/04-develop/08-udf.md b/docs-en/04-develop/08-udf.md deleted file mode 100644 index e344e4024ca629607ff6c1a7be13186d548838c5..0000000000000000000000000000000000000000 --- a/docs-en/04-develop/08-udf.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -sidebar_label: UDF -title: User Defined Functions -description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand the query capability" ---- - -In some use cases, the query capability required by application programs can't be achieved directly by builtin functions. With UDF, the functions developed by users can be utilized by query framework to meet some special requirements. UDF normally takes one column of data as input, but can also support the result of sub query as input. - -From version 2.2.0.0, UDF programmed in C/C++ language can be supported by TDengine. - -Two kinds of functions can be implemented by UDF: scalar function and aggregate function. - -## Define UDF - -### Scalar Function - -Below function template can be used to define your own scalar function. - -`void udfNormalFunc(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)` - -`udfNormalFunc` is the place holder of function name, a function implemented based on the above template can be used to perform scalar computation on data rows. The parameters are fixed to control the data exchange between UDF and TDengine. - -- Defintions of the parameters: - - - data:input data - - itype:the type of input data, for details please refer to [type definition in column_meta](/reference/rest-api/), for example 4 represents INT - - iBytes:the number of bytes consumed by each value in the input data - - oType:the type of output data, similar to iType - - oBytes:the number of bytes consumed by each value in the output data - - numOfRows:the number of rows in the input data - - ts: the column of timestamp corresponding to the input data - - dataOutput:the buffer for output data, total size is `oBytes * numberOfRows` - - interBuf:the buffer for intermediate result, its size is specified by `BUFSIZE` parameter when creating a UDF. It's normally used when the intermediate result is not same as the final result, it's allocated and freed by TDengine. - - tsOutput:the column of timestamps corresponding to the output data; it can be used to output timestamp together with the output data if it's not NULL - - numOfOutput:the number of rows in output data - - buf:for the state exchange between UDF and TDengine - - [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) is one example of the simplest UDF implementations, i.e. one instance of the above `udfNormalFunc` template. It adds one to each value of a column passed in which can be filtered using `where` clause and outputs the result. - -### Aggregate Function - -Below function template can be used to define your own aggregate function. - -`void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)` - -`udfMergeFunc` is the place holder of function name, the function implemented with the above template is used to aggregate the intermediate result, only can be used in the aggregate query for STable. - -Definitions of the parameters: - -- data:array of output data, if interBuf is used it's an array of interBuf -- numOfRows:number of rows in `data` -- dataOutput:the buffer for output data, the size is same as that of the final result; If the result is not final, it can be put in the interBuf, i.e. `data`. -- numOfOutput:number of rows in the output data -- buf:for the state exchange between UDF and TDengine - -[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) is an user defined aggregate function to get the maximum from the absolute value of a column. - -The internal processing is that the data affected by the select statement will be divided into multiple row blocks and `udfNormalFunc`, i.e. `abs_max` in this case, is performed on each row block to generate the intermediate of each sub table, then `udfMergeFunc`, i.e. `abs_max_merge` in this case, is performed on the intermediate result of sub tables to aggregate to generate the final or intermediate result of STable. The intermediate result of STable is finally processed by `udfFinalizeFunc` to generate the final result, which contain either 0 or 1 row. - -Other typical scenarios, like covariance, can also be achieved by aggregate UDF. - -### Finalize - -Below function template can be used to finalize the result of your own UDF, normally used when interBuf is used. - -`void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)` - -`udfFinalizeFunc` is the place holder of function name, definitions of the parameter are as below: - -- dataOutput:buffer for output data -- interBuf:buffer for intermediate result, can be used as input for next processing step -- numOfOutput:number of output data, can only be 0 or 1 for aggregate function -- buf:for state exchange between UDF and TDengine - -## UDF Conventions - -The naming of 3 kinds of UDF, i.e. udfNormalFunc, udfMergeFunc, and udfFinalizeFunc is required to have same prefix, i.e. the actual name of udfNormalFunc, which means udfNormalFunc doesn't need a suffix following the function name. While udfMergeFunc should be udfNormalFunc followed by `_merge`, udfFinalizeFunc should be udfNormalFunc followed by `_finalize`. The naming convention is part of UDF framework, TDengine follows this convention to invoke corresponding actual functions.\ - -According to the kind of UDF to implement, the functions that need to be implemented are different. - -- Scalar function:udfNormalFunc is required -- Aggregate function:udfNormalFunc, udfMergeFunc (if query on STable) and udfFinalizeFunc are required - -To be more accurate, assuming we want to implement a UDF named "foo". If the function is a scalar function, what we really need to implement is `foo`; if the function is aggregate function, we need to implement `foo`, `foo_merge`, and `foo_finalize`. For aggregate UDF, even though one of the three functions is not necessary, there must be an empty implementation. - -## Compile UDF - -The source code of UDF in C can't be utilized by TDengine directly. UDF can only be loaded into TDengine after compiling to dynamically linked library. - -For example, the example UDF `add_one.c` mentioned in previous sections need to be compiled into DLL using below command on Linux Shell. - -```bash -gcc -g -O0 -fPIC -shared add_one.c -o add_one.so -``` - -The generated DLL file `dd_one.so` can be used later when creating UDF. It's recommended to use GCC not older than 7.5. - -## Create and Use UDF - -### Create UDF - -SQL command can be executed on the same hos where the generated UDF DLL resides to load the UDF DLL into TDengine, this operation can't be done through REST interface or web console. Once created, all the clients of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted. - -When creating UDF, it needs to be clarified as either scalar function or aggregate function. If the specified type is wrong, the SQL statements using the function would fail with error. Besides, the input type and output type don't need to be same in UDF, but the input data type and output data type need to be consistent with the UDF definition. - -- Create Scalar Function - -```sql -CREATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; -``` - -- ids(X):the function name to be sued in SQL statement, must be consistent with the function name defined by `udfNormalFunc` -- ids(Y):the absolute path of the DLL file including the implementation of the UDF, the path needs to be quoted by single or double quotes -- typename(Z):the output data type, the value is the literal string of the type -- B:the size of intermediate buffer, in bytes; it's an optional parameter and the range is [0,512] - -For example, below SQL statement can be used to create a UDF from `add_one.so`. - -```sql -CREATE FUNCTION add_one AS "/home/taos/udf_example/add_one.so" OUTPUTTYPE INT; -``` - -- Create Aggregate Function - -```sql -CREATE AGGREGATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) [ BUFSIZE B ]; -``` - -- ids(X):the function name to be sued in SQL statement, must be consistent with the function name defined by `udfNormalFunc` -- ids(Y):the absolute path of the DLL file including the implementation of the UDF, the path needs to be quoted by single or double quotes -- typename(Z):the output data type, the value is the literal string of the type 此 -- B:the size of intermediate buffer, in bytes; it's an optional parameter and the range is [0,512] - -For details about how to use intermediate result, please refer to example program [demo.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c). - -For example, below SQL statement can be used to create a UDF rom `demo.so`. - -```sql -CREATE AGGREGATE FUNCTION demo AS "/home/taos/udf_example/demo.so" OUTPUTTYPE DOUBLE bufsize 14; -``` - -### Manage UDF - -- Delete UDF - -``` -DROP FUNCTION ids(X); -``` - -- ids(X):same as that in `CREATE FUNCTION` statement - -```sql -DROP FUNCTION add_one; -``` - -- Show Available UDF - -```sql -SHOW FUNCTIONS; -``` - -### Use UDF - -The function name specified when creating UDF can be used directly in SQL statements, just like builtin functions. - -```sql -SELECT X(c) FROM table/STable; -``` - -The above SQL statement invokes function X for column c. - -## Restrictions for UDF - -In current version there are some restrictions for UDF - -1. Only Linux is supported when creating and invoking UDF for both client side and server side -2. UDF can't be mixed with builtin functions -3. Only one UDF can be used in a SQL statement -4. Single column is supported as input for UDF -5. Once created successfully, UDF is persisted in MNode of TDengineUDF -6. UDF can't be created through REST interface -7. The function name used when creating UDF in SQL must be consistent with the function name defined in the DLL, i.e. the name defined by `udfNormalFunc` -8. The name name of UDF name should not conflict with any of builtin functions - -## Examples - -### Scalar function example [add_one](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) - -
-add_one.c - -```c -{{#include tests/script/sh/add_one.c}} -``` - -
- -### Aggregate function example [abs_max](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) - -
-abs_max.c - -```c -{{#include tests/script/sh/abs_max.c}} -``` - -
- -### Example for using intermediate result [demo](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c) - -
-demo.c - -```c -{{#include tests/script/sh/demo.c}} -``` - -
diff --git a/docs-en/04-develop/_category_.yml b/docs-en/04-develop/_category_.yml deleted file mode 100644 index 9267c5657c39f1915b89b67a5884bd66e0bd9ed4..0000000000000000000000000000000000000000 --- a/docs-en/04-develop/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: Develop \ No newline at end of file diff --git a/docs-en/04-develop/index.md b/docs-en/04-develop/index.md deleted file mode 100644 index 84ef59a09a18d58288e71be8af78f97f9856759f..0000000000000000000000000000000000000000 --- a/docs-en/04-develop/index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Develop ---- - -To develop an application, if you are going to use TDengine as the tool to process time-series data, you shall take the following steps: - -1. Choose the way for connection to TDengine. No matter what programming language you use, you can always use the REST interface to access TDengine, but you can also use connectors unique to each programming language. -2. Design the data model based on your own application scenarios. According to the data characteristics, you can decide to create one or more databases; learns about static labels, collected metrics, create the STable with right schema, and create the subtables. -3. Decide how to insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually. -4. Based on business requirements, find out what SQL query statements need to be written. -5. If you want to run real-time analysis based on time series data, including various dashboards, it is recommended that you use the TDengine continuous query feature instead of depolying complex streaming processing systems such as Spark or Flink. -6. If your application has modules that need to consume inserted data, and they need to be notified when new data is inserted, it is recommended that you use the data subscription function provided by TDengine without the need to deploy Kafka. -7. In many scenarios (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately. -8. If you find that the SQL functions of TDengine cannot meet your requirements, then you can use user-defined functions to solve the problem. - -This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, you need to check the [SQL manual](/taos-sql/). For a more in-depth understanding of the use of each connector, please read the [Connector Reference Guide](/reference/connector/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](/third-party/). - -If you encounter any problems during the development process, please click ["Submit an issue"](https://github.com/taosdata/TDengine/issues/new/choose) at the bottom of each page and submit it on GitHub right away. - -```mdx-code-block -import DocCardList from '@theme/DocCardList'; -import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; - - -``` diff --git a/docs-en/03-get-started/_apt_get_install.mdx b/docs-en/05-get-started/_apt_get_install.mdx similarity index 100% rename from docs-en/03-get-started/_apt_get_install.mdx rename to docs-en/05-get-started/_apt_get_install.mdx diff --git a/docs-en/03-get-started/_category_.yml b/docs-en/05-get-started/_category_.yml similarity index 100% rename from docs-en/03-get-started/_category_.yml rename to docs-en/05-get-started/_category_.yml diff --git a/docs-en/05-get-started/_pkg_install.mdx b/docs-en/05-get-started/_pkg_install.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cf10497c96ba1d777e45340b0312d97c127b6fcb --- /dev/null +++ b/docs-en/05-get-started/_pkg_install.mdx @@ -0,0 +1,17 @@ +import PkgList from "/components/PkgList"; + +It's very easy to install TDengine and would take you only a few minutes from downloading to finishing installation. + +For the convenience of users, from version 2.4.0.10, the standard server side installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark` and sample code. If only the `taosd` server and C/C++ connector are required, you can also choose to download the lite package. + +Three kinds of packages are provided, tar.gz, rpm and deb. Especially the tar.gz package is provided for the convenience of enterprise customers on different kinds of operating systems, it includes `taosdump` and TDinsight installation script which are normally only provided in taos-tools rpm and deb packages. + +Between two major release versions, some beta versions may be delivered for users to try some new features. + + + +For the details please refer to [Install and Uninstall](/operation/pkg-install)。 + +To see the details of versions, please refer to [Download List](https://tdengine.com/all-downloads) and [Release Notes](https://github.com/taosdata/TDengine/releases). + + diff --git a/docs-en/03-get-started/03-get-started.md b/docs-en/05-get-started/index.md similarity index 93% rename from docs-en/03-get-started/03-get-started.md rename to docs-en/05-get-started/index.md index 6cd7a53a914662876bb41d1697c7299dc56f21c6..56958ef3ec1c206ee0cff45c67fd3c3a6fa6753a 100644 --- a/docs-en/03-get-started/03-get-started.md +++ b/docs-en/05-get-started/index.md @@ -10,7 +10,7 @@ import AptGetInstall from "./\_apt_get_install.mdx"; ## Quick Install -The full package of TDengine includes the server(taosd), taosAdapter for connecting with third-party systems and providing a RESTful interface, client driver(taosc), command-line program(CLI, taos) and some tools. For the current version, the server taosd and taosAdapter can only be installed and run on Linux systems. In the future taosd and taosAdapter will also be supported on Windows, macOS and other systems. The client driver taosc and TDengine CLI can be installed and run on Windows or Linux. In addition to the [RESTful interface](/reference/taosadapter), TDengine also provides connectors for a number of programming languages. In versions before 2.4, there is no taosAdapter, and the RESTful interface is provided by the built-in HTTP service of taosd. +The full package of TDengine includes the server(taosd), taosAdapter for connecting with third-party systems and providing a RESTful interface, client driver(taosc), command-line program(CLI, taos) and some tools. For the current version, the server taosd and taosAdapter can only be installed and run on Linux systems. In the future taosd and taosAdapter will also be supported on Windows, macOS and other systems. The client driver taosc and TDengine CLI can be installed and run on Windows or Linux. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter). Prior to version 2.4.0.0, taosAdapter did not exist and the RESTful interface was provided by the built-in HTTP service of taosd. TDengine supports X64/ARM64/MIPS64/Alpha64 hardware platforms, and will support ARM32, RISC-V and other CPU architectures in the future. @@ -130,7 +130,7 @@ After TDengine server is running,execute `taosBenchmark` (previously named tao taosBenchmark ``` -This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "beijing" or "shanghai". +This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "California.SanFrancisco" or "California.SanDiego". This command will insert 100 million rows into the database quickly. Time to insert depends on the hardware configuration, it only takes a dozen seconds for a regular PC server. @@ -152,10 +152,10 @@ query the average, maximum, minimum of 100 million rows: taos> select avg(current), max(voltage), min(phase) from test.meters; ``` -query the total number of rows with location="beijing": +query the total number of rows with location="California.SanFrancisco": ```sql -taos> select count(*) from test.meters where location="beijing"; +taos> select count(*) from test.meters where location="California.SanFrancisco"; ``` query the average, maximum, minimum of all rows with groupId=10: diff --git a/docs-en/04-develop/01-connect/_category_.yml b/docs-en/07-develop/01-connect/_category_.yml similarity index 100% rename from docs-en/04-develop/01-connect/_category_.yml rename to docs-en/07-develop/01-connect/_category_.yml diff --git a/docs-en/04-develop/01-connect/_connect_c.mdx b/docs-en/07-develop/01-connect/_connect_c.mdx similarity index 100% rename from docs-en/04-develop/01-connect/_connect_c.mdx rename to docs-en/07-develop/01-connect/_connect_c.mdx diff --git a/docs-en/04-develop/01-connect/_connect_cs.mdx b/docs-en/07-develop/01-connect/_connect_cs.mdx similarity index 100% rename from docs-en/04-develop/01-connect/_connect_cs.mdx rename to docs-en/07-develop/01-connect/_connect_cs.mdx diff --git a/docs-en/04-develop/01-connect/_connect_go.mdx b/docs-en/07-develop/01-connect/_connect_go.mdx similarity index 100% rename from docs-en/04-develop/01-connect/_connect_go.mdx rename to docs-en/07-develop/01-connect/_connect_go.mdx diff --git a/docs-en/04-develop/01-connect/_connect_java.mdx b/docs-en/07-develop/01-connect/_connect_java.mdx similarity index 100% rename from docs-en/04-develop/01-connect/_connect_java.mdx rename to docs-en/07-develop/01-connect/_connect_java.mdx diff --git a/docs-en/04-develop/01-connect/_connect_node.mdx b/docs-en/07-develop/01-connect/_connect_node.mdx similarity index 100% rename from docs-en/04-develop/01-connect/_connect_node.mdx rename to docs-en/07-develop/01-connect/_connect_node.mdx diff --git a/docs-en/07-develop/01-connect/_connect_python.mdx b/docs-en/07-develop/01-connect/_connect_python.mdx new file mode 100644 index 0000000000000000000000000000000000000000..44b7586fadbf618231fce7753d3b4b68853a7f57 --- /dev/null +++ b/docs-en/07-develop/01-connect/_connect_python.mdx @@ -0,0 +1,3 @@ +```python title="Native Connection" +{{#include docs-examples/python/connect_example.py}} +``` diff --git a/docs-en/04-develop/01-connect/_connect_r.mdx b/docs-en/07-develop/01-connect/_connect_r.mdx similarity index 100% rename from docs-en/04-develop/01-connect/_connect_r.mdx rename to docs-en/07-develop/01-connect/_connect_r.mdx diff --git a/docs-en/04-develop/01-connect/_connect_rust.mdx b/docs-en/07-develop/01-connect/_connect_rust.mdx similarity index 100% rename from docs-en/04-develop/01-connect/_connect_rust.mdx rename to docs-en/07-develop/01-connect/_connect_rust.mdx diff --git a/docs-en/04-develop/01-connect/index.md b/docs-en/07-develop/01-connect/index.md similarity index 76% rename from docs-en/04-develop/01-connect/index.md rename to docs-en/07-develop/01-connect/index.md index ecb8caa308da147adc191b98af9df81c7af1eb0b..b9217b828d0d08c4ff1eacd27406d4e3bfba8eac 100644 --- a/docs-en/04-develop/01-connect/index.md +++ b/docs-en/07-develop/01-connect/index.md @@ -1,7 +1,7 @@ --- -sidebar_label: Connection -title: Connect to TDengine -description: "This document explains how to establish connection to TDengine, and briefly introduce how to install and use TDengine connectors." +sidebar_label: Connect +title: Connect +description: "This document explains how to establish connections to TDengine, and briefly introduces how to install and use TDengine connectors." --- import Tabs from "@theme/Tabs"; @@ -19,25 +19,24 @@ import InstallOnLinux from "../../14-reference/03-connector/\_windows_install.md import VerifyLinux from "../../14-reference/03-connector/\_verify_linux.mdx"; import VerifyWindows from "../../14-reference/03-connector/\_verify_windows.mdx"; -Any application programs running on any kind of platforms can access TDengine through the REST API provided by TDengine. For the details, please refer to [REST API](/reference/rest-api/). Besides, application programs can use the connectors of multiple programming languages to access TDengine, including C/C++, Java, Python, Go, Node.js, C#, and Rust. This chapter describes how to establish connection to TDengine and briefly introduce how to install and use connectors. For details about the connectors, please refer to [Connectors](/reference/connector/) +Any application programs running on any kind of platform can access TDengine through the REST API provided by TDengine. For details, please refer to [REST API](/reference/rest-api/). Additionally, application programs can use the connectors of multiple programming languages including C/C++, Java, Python, Go, Node.js, C#, and Rust to access TDengine. This chapter describes how to establish a connection to TDengine and briefly introduces how to install and use connectors. For details about the connectors, please refer to [Connectors](/reference/connector/) ## Establish Connection There are two ways for a connector to establish connections to TDengine: -1. Connection through the REST API provided by taosAdapter component, this way is called "REST connection" hereinafter. +1. Connection through the REST API provided by the taosAdapter component, this way is called "REST connection" hereinafter. 2. Connection through the TDengine client driver (taosc), this way is called "Native connection" hereinafter. -Either way, same or similar APIs are provided by connectors to access database or execute SQL statements, no obvious difference can be observed. - Key differences: -1. With REST connection, it's not necessary to install TDengine client driver (taosc), it's more friendly for cross-platform with the cost of 30% performance downgrade. When taosc has an upgrade, application does not need to make changes. -2. With native connection, full compatibility of TDengine can be utilized, like [Parameter Binding](/reference/connector/cpp#Parameter Binding-api), [Subscription](reference/connector/cpp#Subscription), etc. But taosc has to be installed, some platforms may not be supported. +1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc. +2. The TDengine client driver (taosc) is not supported across all platforms, and applications built on taosc may need to be modified when updating taosc to newer versions. +3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade. ## Install Client Driver taosc -If choosing to use native connection and the application is not on the same host as TDengine server, TDengine client driver taosc needs to be installed on the host where the application is. If choosing to use REST connection or the application is on the same host as server side, this step can be skipped. It's better to use same version of taosc as the server. +If you are choosing to use the native connection and the the application is not on the same host as TDengine server, the TDengine client driver taosc needs to be installed on the application host. If choosing to use the REST connection or the application is on the same host as TDengine server, this step can be skipped. It's better to use same version of taosc as the TDengine server. ### Install diff --git a/docs-en/04-develop/02-model/_category_.yml b/docs-en/07-develop/02-model/_category_.yml similarity index 100% rename from docs-en/04-develop/02-model/_category_.yml rename to docs-en/07-develop/02-model/_category_.yml diff --git a/docs-en/07-develop/02-model/index.mdx b/docs-en/07-develop/02-model/index.mdx new file mode 100644 index 0000000000000000000000000000000000000000..86853aaaa3f7285fe042a892e2ec903d57894111 --- /dev/null +++ b/docs-en/07-develop/02-model/index.mdx @@ -0,0 +1,93 @@ +--- +title: Data Model +--- + +The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the STable (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details. + +## Create Database + +The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. Below is an example of the SQL statement to create a database. + +```sql +CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1; +``` + +In the above SQL statement: +- a database named "power" will be created +- the data in it will be kept for 365 days, which means that data older than 365 days will be deleted automatically +- a new data file will be created every 10 days +- the number of memory blocks is 6 +- data is allowed to be updated + +For more details please refer to [Database](/taos-sql/database). + +After creating a database, the current database in use can be switched using SQL command `USE`. For example the SQL statement below switches the current database to `power`. Without the current database specified, table name must be preceded with the corresponding database name. + +```sql +USE power; +``` + +:::note + +- Any table or STable must belong to a database. To create a table or STable, the database it belongs to must be ready. +- JOIN operations can't be performed on tables from two different databases. +- Timestamp needs to be specified when inserting rows or querying historical rows. + +::: + +## Create STable + +In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/tdinternal/arch#model_table1), the SQL statement below can be used to create the super table. + +```sql +CREATE STable meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); +``` + +:::note +If you are using versions prior to 2.0.15, the `STable` keyword needs to be replaced with `TABLE`. + +::: + +Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must always be a timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The remaining columns can [contain data of type](/taos-sql/data-type/) integer, float, double, string etc. In addition, the schema for tags, like location and groupId in the example, must be provided. The tag type can be integer, float, string, etc. Tags are essentially the static properties of a data collection point. For example, properties like the location, device type, device group ID, manager ID are tags. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details. + +For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices. + +At most 4096 (or 1024 prior to version 2.1.7.0) columns are allowed in a STable. If there are more than 4096 of metrics to be collected for a data collection point, multiple STables are required. There can be multiple databases in a system, while one or more STables can exist in a database. + +## Create Table + +A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Additionally, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement. + +```sql +CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); +``` + +In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details. + +In the TDengine system, it's recommended to create a table for a data collection point via STable. A table created via STable is called subtable in some parts of the TDengine documentation. All SQL commands applied on regular tables can be applied on subtables. + +:::warning +It's not recommended to create a table in a database while using a STable from another database as template. + +:::tip +It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value. + +## Create Table Automatically + +In some circumstances, it's unknown whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exists. + +```sql +INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now, 10.2, 219, 0.32); +``` + +In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"California.SanFrancisco", 2`. + +For more details please refer to [Create Table Automatically](/taos-sql/insert#automatically-create-table-when-inserting). + +## Single Column vs Multiple Column + +A multiple columns data model is supported in TDengine. As long as multiple metrics are collected by the same data collection point at the same time, i.e. the timestamps are identical, these metrics can be put in a single STable as columns. + +However, there is another kind of design, i.e. single column data model in which a table is created for each metric. This means that a STable is required for each kind of metric. For example in a single column model, 3 STables would be required for current, voltage and phase. + +It's recommended to use a multiple column data model as much as possible because insert and query performance is higher. In some cases, however, the collected metrics may vary frequently and so the corresponding STable schema needs to be changed frequently too. In such cases, it's more convenient to use single column data model. diff --git a/docs-en/04-develop/03-insert-data/01-sql-writing.mdx b/docs-en/07-develop/03-insert-data/01-sql-writing.mdx similarity index 63% rename from docs-en/04-develop/03-insert-data/01-sql-writing.mdx rename to docs-en/07-develop/03-insert-data/01-sql-writing.mdx index 9f66992d3de755389c3a0722ebb09097177742f1..397b1a14fd76c1372c79eb88575f2bf21cb62050 100644 --- a/docs-en/04-develop/03-insert-data/01-sql-writing.mdx +++ b/docs-en/07-develop/03-insert-data/01-sql-writing.mdx @@ -1,5 +1,5 @@ --- -sidebar_label: SQL +sidebar_label: Insert Using SQL title: Insert Using SQL --- @@ -22,11 +22,11 @@ import CStmt from "./_c_stmt.mdx"; ## Introduction -Application program can execute `INSERT` statement through connectors to insert rows. TAOS CLI can be launched manually to insert data too. +Application programs can execute `INSERT` statement through connectors to insert rows. The TAOS CLI can also be used to manually insert data. ### Insert Single Row -Below SQL statement is used to insert one row into table "d1001". +The below SQL statement is used to insert one row into table "d1001". ```sql INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); @@ -34,7 +34,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); ### Insert Multiple Rows -Multiple rows can be inserted in single SQL statement. Below example inserts 2 rows into table "d1001". +Multiple rows can be inserted in a single SQL statement. The example below inserts 2 rows into table "d1001". ```sql INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25); @@ -42,7 +42,7 @@ INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, ### Insert into Multiple Tables -Data can be inserted into multiple tables in same SQL statement. Below example inserts 2 rows into table "d1001" and 1 row into table "d1002". +Data can be inserted into multiple tables in the same SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002". ```sql INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31); @@ -52,14 +52,14 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). :::info -- Inserting in batch can gain better performance. Normally, the higher the batch size, the better the performance. Please be noted each single row can't exceed 16K bytes and each single SQL statement can't exceed 1M bytes. -- Inserting with multiple threads can gain better performance too. However, depending on the system resources on the application side and the server side, with the number of inserting threads grows to a specific point, the performance may drop instead of growing. The proper number of threads need to be tested in a specific environment to find the best number. +- Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 48K bytes and each SQL statement can't exceed 1MB. +- Inserting with multiple threads can also improve performance. However, depending on the system resources on the application side and the server side, when the number of inserting threads grows beyond a specific point the performance may drop instead of improving. The proper number of threads needs to be tested in a specific environment to find the best number. ::: :::warning -- If the timestamp for the row to be inserted already exists in the table, the behavior depends on the value of parameter `UPDATE`. If it's set to 0 (also the default value), the row will be discarded. If it's set to 1, the new values will override the old values for the same row. +- If the timestamp for the row to be inserted already exists in the table, the behavior depends on the value of parameter `UPDATE`. If it's set to 0 (the default value), the row will be discarded. If it's set to 1, the new values will override the old values for the same row. - The timestamp to be inserted must be newer than the timestamp of subtracting current time by the parameter `KEEP`. If `KEEP` is set to 3650 days, then the data older than 3650 days ago can't be inserted. The timestamp to be inserted can't be newer than the timestamp of current time plus parameter `DAYS`. If `DAYS` is set to 2, the data newer than 2 days later can't be inserted. ::: @@ -95,13 +95,13 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). :::note 1. With either native connection or REST connection, the above samples can work well. -2. Please be noted that `use db` can't be used with REST connection because REST connection is stateless, so in the samples `dbName.tbName` is used to specify the table name. +2. Please note that `use db` can't be used with a REST connection because REST connections are stateless, so in the samples `dbName.tbName` is used to specify the table name. ::: ### Insert with Parameter Binding -TDengine also provides Prepare API that support parameter binding. Similar to MySQL, only `?` can be used in these APIs to represent the parameters to bind. From version 2.1.1.0 and 2.1.2.0, parameter binding support for inserting data has been improved significantly to improve the insert performance by avoiding the cost of parsing SQL statements. +TDengine also provides API support for parameter binding. Similar to MySQL, only `?` can be used in these APIs to represent the parameters to bind. From version 2.1.1.0 and 2.1.2.0, parameter binding support for inserting data has improved significantly to improve the insert performance by avoiding the cost of parsing SQL statements. Parameter binding is available only with native connection. diff --git a/docs-en/04-develop/03-insert-data/02-influxdb-line.mdx b/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx similarity index 75% rename from docs-en/04-develop/03-insert-data/02-influxdb-line.mdx rename to docs-en/07-develop/03-insert-data/02-influxdb-line.mdx index 172003d203fa309ce51b3ecae9a7490a59f513d7..be46ebf0c97a29b57c1b57eb8ea5c9394f85b93a 100644 --- a/docs-en/04-develop/03-insert-data/02-influxdb-line.mdx +++ b/docs-en/07-develop/03-insert-data/02-influxdb-line.mdx @@ -15,13 +15,13 @@ import CLine from "./_c_line.mdx"; ## Introduction -A single line of text is used in InfluxDB Line protocol format represents one row of data, each line contains 4 parts as shown below. +In the InfluxDB Line protocol format, a single line of text is used to represent one row of data. Each line contains 4 parts as shown below. ``` measurement,tag_set field_set timestamp ``` -- `measurement` will be used as the STable name +- `measurement` will be used as the name of the STable - `tag_set` will be used as tags, with format like `=,=` - `field_set`will be used as data columns, with format like `=,=` - `timestamp` is the primary key timestamp corresponding to this row of data @@ -29,13 +29,13 @@ measurement,tag_set field_set timestamp For example: ``` -meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500 +meters,location=California.LoSangeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500 ``` :::note -- All the data in `tag_set` will be converted to ncahr type automatically . -- Each data in `field_set` must be self-description for its data type. For example 1.2f32 means a value 1.2 of float type, it will be treated as double without the "f" type suffix. +- All the data in `tag_set` will be converted to nchar type automatically . +- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double. - Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h). ::: diff --git a/docs-en/04-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx similarity index 74% rename from docs-en/04-develop/03-insert-data/03-opentsdb-telnet.mdx rename to docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx index 66bb67c25669b906183526377f60b969ea3d1e85..18a695cda8efbef075451ff53e542d9e69c58e0b 100644 --- a/docs-en/04-develop/03-insert-data/03-opentsdb-telnet.mdx +++ b/docs-en/07-develop/03-insert-data/03-opentsdb-telnet.mdx @@ -15,21 +15,21 @@ import CTelnet from "./_c_opts_telnet.mdx"; ## Introduction -A single line of text is used in OpenTSDB line protocol to represent one row of data. OpenTSDB employs single column data model, so one line can only contains single data column. There can be multiple tags. Each line contains 4 parts as below: +A single line of text is used in OpenTSDB line protocol to represent one row of data. OpenTSDB employs a single column data model, so each line can only contain a single data column. There can be multiple tags. Each line contains 4 parts as below: ``` =[ =] ``` -- `metric` will be used as STable name. -- `timestamp` is the timestamp of current row of data. The time precision will be determined automatically based on the length of the timestamp. second and millisecond time precision are supported.\ +- `metric` will be used as the STable name. +- `timestamp` is the timestamp of current row of data. The time precision will be determined automatically based on the length of the timestamp. Second and millisecond time precision are supported. - `value` is a metric which must be a numeric value, the corresponding column name is "value". -- The last part is tag sets separated by space, all tags will be converted to nchar type automatically. +- The last part is the tag set separated by spaces, all tags will be converted to nchar type automatically. For example: ```txt -meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3 +meters.current 1648432611250 11.3 location=California.LoSangeles groupid=3 ``` Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details. @@ -60,7 +60,7 @@ Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_te -2 STables will be crated automatically while each STable has 4 rows of data in the above sample code. +2 STables will be created automatically and each STable has 4 rows of data in the above sample code. ```cmd taos> use test; @@ -76,9 +76,9 @@ Query OK, 2 row(s) in set (0.002544s) taos> select tbname, * from `meters.current`; tbname | ts | value | groupid | location | ================================================================================================================================== - t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | Beijing.Haidian | - t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | Beijing.Haidian | - t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | Beijing.Chaoyang | - t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | Beijing.Chaoyang | + t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LoSangeles | + t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LoSangeles | + t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | California.SanFrancisco | + t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco | Query OK, 4 row(s) in set (0.005399s) ``` diff --git a/docs-en/04-develop/03-insert-data/04-opentsdb-json.mdx b/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx similarity index 92% rename from docs-en/04-develop/03-insert-data/04-opentsdb-json.mdx rename to docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx index fb938d26961f86cefd3b5b9d31e4eb3481e10873..3a239440311c736159d6060db5e730c5e5665bcb 100644 --- a/docs-en/04-develop/03-insert-data/04-opentsdb-json.mdx +++ b/docs-en/07-develop/03-insert-data/04-opentsdb-json.mdx @@ -15,7 +15,7 @@ import CJson from "./_c_opts_json.mdx"; ## Introduction -A JSON string is used in OpenTSDB JSON to represent one or more rows of data, for exmaple: +A JSON string is used in OpenTSDB JSON to represent one or more rows of data, for example: ```json [ @@ -47,7 +47,7 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http :::note - In JSON protocol, strings will be converted to nchar type and numeric values will be converted to double type. -- Only data in array format is accepted, array must be used even there is only one row. +- Only data in array format is accepted and so an array must be used even if there is only one row. ::: @@ -93,7 +93,7 @@ Query OK, 2 row(s) in set (0.001954s) taos> select * from `meters.current`; ts | value | groupid | location | =================================================================================================================== - 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | Beijing.Chaoyang | - 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | Beijing.Chaoyang | + 2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco | + 2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco | Query OK, 2 row(s) in set (0.004076s) ``` diff --git a/docs-en/04-develop/03-insert-data/_c_line.mdx b/docs-en/07-develop/03-insert-data/_c_line.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_c_line.mdx rename to docs-en/07-develop/03-insert-data/_c_line.mdx diff --git a/docs-en/04-develop/03-insert-data/_c_opts_json.mdx b/docs-en/07-develop/03-insert-data/_c_opts_json.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_c_opts_json.mdx rename to docs-en/07-develop/03-insert-data/_c_opts_json.mdx diff --git a/docs-en/04-develop/03-insert-data/_c_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_c_opts_telnet.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_c_opts_telnet.mdx rename to docs-en/07-develop/03-insert-data/_c_opts_telnet.mdx diff --git a/docs-en/04-develop/03-insert-data/_c_sql.mdx b/docs-en/07-develop/03-insert-data/_c_sql.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_c_sql.mdx rename to docs-en/07-develop/03-insert-data/_c_sql.mdx diff --git a/docs-en/04-develop/03-insert-data/_c_stmt.mdx b/docs-en/07-develop/03-insert-data/_c_stmt.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_c_stmt.mdx rename to docs-en/07-develop/03-insert-data/_c_stmt.mdx diff --git a/docs-en/04-develop/03-insert-data/_category_.yml b/docs-en/07-develop/03-insert-data/_category_.yml similarity index 100% rename from docs-en/04-develop/03-insert-data/_category_.yml rename to docs-en/07-develop/03-insert-data/_category_.yml diff --git a/docs-en/04-develop/03-insert-data/_cs_line.mdx b/docs-en/07-develop/03-insert-data/_cs_line.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_cs_line.mdx rename to docs-en/07-develop/03-insert-data/_cs_line.mdx diff --git a/docs-en/04-develop/03-insert-data/_cs_opts_json.mdx b/docs-en/07-develop/03-insert-data/_cs_opts_json.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_cs_opts_json.mdx rename to docs-en/07-develop/03-insert-data/_cs_opts_json.mdx diff --git a/docs-en/04-develop/03-insert-data/_cs_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_cs_opts_telnet.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_cs_opts_telnet.mdx rename to docs-en/07-develop/03-insert-data/_cs_opts_telnet.mdx diff --git a/docs-en/04-develop/03-insert-data/_cs_sql.mdx b/docs-en/07-develop/03-insert-data/_cs_sql.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_cs_sql.mdx rename to docs-en/07-develop/03-insert-data/_cs_sql.mdx diff --git a/docs-en/04-develop/03-insert-data/_cs_stmt.mdx b/docs-en/07-develop/03-insert-data/_cs_stmt.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_cs_stmt.mdx rename to docs-en/07-develop/03-insert-data/_cs_stmt.mdx diff --git a/docs-en/04-develop/03-insert-data/_go_line.mdx b/docs-en/07-develop/03-insert-data/_go_line.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_go_line.mdx rename to docs-en/07-develop/03-insert-data/_go_line.mdx diff --git a/docs-en/04-develop/03-insert-data/_go_opts_json.mdx b/docs-en/07-develop/03-insert-data/_go_opts_json.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_go_opts_json.mdx rename to docs-en/07-develop/03-insert-data/_go_opts_json.mdx diff --git a/docs-en/04-develop/03-insert-data/_go_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_go_opts_telnet.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_go_opts_telnet.mdx rename to docs-en/07-develop/03-insert-data/_go_opts_telnet.mdx diff --git a/docs-en/04-develop/03-insert-data/_go_sql.mdx b/docs-en/07-develop/03-insert-data/_go_sql.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_go_sql.mdx rename to docs-en/07-develop/03-insert-data/_go_sql.mdx diff --git a/docs-en/04-develop/03-insert-data/_go_stmt.mdx b/docs-en/07-develop/03-insert-data/_go_stmt.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_go_stmt.mdx rename to docs-en/07-develop/03-insert-data/_go_stmt.mdx diff --git a/docs-en/04-develop/03-insert-data/_java_line.mdx b/docs-en/07-develop/03-insert-data/_java_line.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_java_line.mdx rename to docs-en/07-develop/03-insert-data/_java_line.mdx diff --git a/docs-en/04-develop/03-insert-data/_java_opts_json.mdx b/docs-en/07-develop/03-insert-data/_java_opts_json.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_java_opts_json.mdx rename to docs-en/07-develop/03-insert-data/_java_opts_json.mdx diff --git a/docs-en/04-develop/03-insert-data/_java_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_java_opts_telnet.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_java_opts_telnet.mdx rename to docs-en/07-develop/03-insert-data/_java_opts_telnet.mdx diff --git a/docs-en/04-develop/03-insert-data/_java_sql.mdx b/docs-en/07-develop/03-insert-data/_java_sql.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_java_sql.mdx rename to docs-en/07-develop/03-insert-data/_java_sql.mdx diff --git a/docs-en/04-develop/03-insert-data/_java_stmt.mdx b/docs-en/07-develop/03-insert-data/_java_stmt.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_java_stmt.mdx rename to docs-en/07-develop/03-insert-data/_java_stmt.mdx diff --git a/docs-en/04-develop/03-insert-data/_js_line.mdx b/docs-en/07-develop/03-insert-data/_js_line.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_js_line.mdx rename to docs-en/07-develop/03-insert-data/_js_line.mdx diff --git a/docs-en/04-develop/03-insert-data/_js_opts_json.mdx b/docs-en/07-develop/03-insert-data/_js_opts_json.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_js_opts_json.mdx rename to docs-en/07-develop/03-insert-data/_js_opts_json.mdx diff --git a/docs-en/04-develop/03-insert-data/_js_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_js_opts_telnet.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_js_opts_telnet.mdx rename to docs-en/07-develop/03-insert-data/_js_opts_telnet.mdx diff --git a/docs-en/04-develop/03-insert-data/_js_sql.mdx b/docs-en/07-develop/03-insert-data/_js_sql.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_js_sql.mdx rename to docs-en/07-develop/03-insert-data/_js_sql.mdx diff --git a/docs-en/04-develop/03-insert-data/_js_stmt.mdx b/docs-en/07-develop/03-insert-data/_js_stmt.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_js_stmt.mdx rename to docs-en/07-develop/03-insert-data/_js_stmt.mdx diff --git a/docs-en/04-develop/03-insert-data/_py_line.mdx b/docs-en/07-develop/03-insert-data/_py_line.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_py_line.mdx rename to docs-en/07-develop/03-insert-data/_py_line.mdx diff --git a/docs-en/04-develop/03-insert-data/_py_opts_json.mdx b/docs-en/07-develop/03-insert-data/_py_opts_json.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_py_opts_json.mdx rename to docs-en/07-develop/03-insert-data/_py_opts_json.mdx diff --git a/docs-en/04-develop/03-insert-data/_py_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_py_opts_telnet.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_py_opts_telnet.mdx rename to docs-en/07-develop/03-insert-data/_py_opts_telnet.mdx diff --git a/docs-en/04-develop/03-insert-data/_py_sql.mdx b/docs-en/07-develop/03-insert-data/_py_sql.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_py_sql.mdx rename to docs-en/07-develop/03-insert-data/_py_sql.mdx diff --git a/docs-en/04-develop/03-insert-data/_py_stmt.mdx b/docs-en/07-develop/03-insert-data/_py_stmt.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_py_stmt.mdx rename to docs-en/07-develop/03-insert-data/_py_stmt.mdx diff --git a/docs-en/04-develop/03-insert-data/_rust_line.mdx b/docs-en/07-develop/03-insert-data/_rust_line.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_rust_line.mdx rename to docs-en/07-develop/03-insert-data/_rust_line.mdx diff --git a/docs-en/04-develop/03-insert-data/_rust_opts_json.mdx b/docs-en/07-develop/03-insert-data/_rust_opts_json.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_rust_opts_json.mdx rename to docs-en/07-develop/03-insert-data/_rust_opts_json.mdx diff --git a/docs-en/04-develop/03-insert-data/_rust_opts_telnet.mdx b/docs-en/07-develop/03-insert-data/_rust_opts_telnet.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_rust_opts_telnet.mdx rename to docs-en/07-develop/03-insert-data/_rust_opts_telnet.mdx diff --git a/docs-en/04-develop/03-insert-data/_rust_sql.mdx b/docs-en/07-develop/03-insert-data/_rust_sql.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_rust_sql.mdx rename to docs-en/07-develop/03-insert-data/_rust_sql.mdx diff --git a/docs-en/04-develop/03-insert-data/_rust_stmt.mdx b/docs-en/07-develop/03-insert-data/_rust_stmt.mdx similarity index 100% rename from docs-en/04-develop/03-insert-data/_rust_stmt.mdx rename to docs-en/07-develop/03-insert-data/_rust_stmt.mdx diff --git a/docs-en/07-develop/03-insert-data/index.md b/docs-en/07-develop/03-insert-data/index.md new file mode 100644 index 0000000000000000000000000000000000000000..1a71e719a56448e4b535632e570ce8a04d2282bb --- /dev/null +++ b/docs-en/07-develop/03-insert-data/index.md @@ -0,0 +1,12 @@ +--- +title: Insert Data +--- + +TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, and OpenTSDB JSON protocol. Data can be inserted row by row, or in batches. Data from one or more collection points can be inserted simultaneously. Data can be inserted with multiple threads, and out of order data and historical data can be inserted as well. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create STables and tables in advance if using schemaless protocols, and the schemas can be adjusted automatically based on the data being inserted. + +```mdx-code-block +import DocCardList from '@theme/DocCardList'; +import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; + + +``` diff --git a/docs-en/04-develop/04-query-data/_c.mdx b/docs-en/07-develop/04-query-data/_c.mdx similarity index 100% rename from docs-en/04-develop/04-query-data/_c.mdx rename to docs-en/07-develop/04-query-data/_c.mdx diff --git a/docs-en/04-develop/04-query-data/_c_async.mdx b/docs-en/07-develop/04-query-data/_c_async.mdx similarity index 100% rename from docs-en/04-develop/04-query-data/_c_async.mdx rename to docs-en/07-develop/04-query-data/_c_async.mdx diff --git a/docs-en/07-develop/04-query-data/_category_.yml b/docs-en/07-develop/04-query-data/_category_.yml new file mode 100644 index 0000000000000000000000000000000000000000..809db34621a63505ceace7ba182e07c698bdbddb --- /dev/null +++ b/docs-en/07-develop/04-query-data/_category_.yml @@ -0,0 +1 @@ +label: Query Data diff --git a/docs-en/04-develop/04-query-data/_cs.mdx b/docs-en/07-develop/04-query-data/_cs.mdx similarity index 100% rename from docs-en/04-develop/04-query-data/_cs.mdx rename to docs-en/07-develop/04-query-data/_cs.mdx diff --git a/docs-en/04-develop/04-query-data/_cs_async.mdx b/docs-en/07-develop/04-query-data/_cs_async.mdx similarity index 100% rename from docs-en/04-develop/04-query-data/_cs_async.mdx rename to docs-en/07-develop/04-query-data/_cs_async.mdx diff --git a/docs-en/04-develop/04-query-data/_go.mdx b/docs-en/07-develop/04-query-data/_go.mdx similarity index 100% rename from docs-en/04-develop/04-query-data/_go.mdx rename to docs-en/07-develop/04-query-data/_go.mdx diff --git a/docs-en/04-develop/04-query-data/_go_async.mdx b/docs-en/07-develop/04-query-data/_go_async.mdx similarity index 100% rename from docs-en/04-develop/04-query-data/_go_async.mdx rename to docs-en/07-develop/04-query-data/_go_async.mdx diff --git a/docs-en/04-develop/04-query-data/_java.mdx b/docs-en/07-develop/04-query-data/_java.mdx similarity index 100% rename from docs-en/04-develop/04-query-data/_java.mdx rename to docs-en/07-develop/04-query-data/_java.mdx diff --git a/docs-en/04-develop/04-query-data/_js.mdx b/docs-en/07-develop/04-query-data/_js.mdx similarity index 100% rename from docs-en/04-develop/04-query-data/_js.mdx rename to docs-en/07-develop/04-query-data/_js.mdx diff --git a/docs-en/04-develop/04-query-data/_js_async.mdx b/docs-en/07-develop/04-query-data/_js_async.mdx similarity index 100% rename from docs-en/04-develop/04-query-data/_js_async.mdx rename to docs-en/07-develop/04-query-data/_js_async.mdx diff --git a/docs-en/04-develop/04-query-data/_py.mdx b/docs-en/07-develop/04-query-data/_py.mdx similarity index 100% rename from docs-en/04-develop/04-query-data/_py.mdx rename to docs-en/07-develop/04-query-data/_py.mdx diff --git a/docs-en/04-develop/04-query-data/_py_async.mdx b/docs-en/07-develop/04-query-data/_py_async.mdx similarity index 100% rename from docs-en/04-develop/04-query-data/_py_async.mdx rename to docs-en/07-develop/04-query-data/_py_async.mdx diff --git a/docs-en/04-develop/04-query-data/_rust.mdx b/docs-en/07-develop/04-query-data/_rust.mdx similarity index 100% rename from docs-en/04-develop/04-query-data/_rust.mdx rename to docs-en/07-develop/04-query-data/_rust.mdx diff --git a/docs-en/04-develop/04-query-data/index.mdx b/docs-en/07-develop/04-query-data/index.mdx similarity index 56% rename from docs-en/04-develop/04-query-data/index.mdx rename to docs-en/07-develop/04-query-data/index.mdx index 4016f8453ba9e0679a2798b92cd40efcb926343b..a212fa9529215fc24c55c95a166cfc1a407359b2 100644 --- a/docs-en/04-develop/04-query-data/index.mdx +++ b/docs-en/07-develop/04-query-data/index.mdx @@ -1,6 +1,6 @@ --- -Sidebar_label: Select -title: Select +Sidebar_label: Query data +title: Query data description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors." --- @@ -20,7 +20,7 @@ import CAsync from "./_c_async.mdx"; ## Introduction -SQL is used by TDengine as the query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine CLI `taos` can also be used to execute SQL Ad-Hoc query. Here is the list of major query functionalities supported by TDengine: +SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine: - Query on single column or multiple columns - Filter on tags or data columns:>, <, =, <\>, like @@ -31,7 +31,7 @@ SQL is used by TDengine as the query language. Application programs can send SQL - Join query with timestamp alignment - Aggregate functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff -For example, below SQL statement can be executed in TDengine CLI `taos` to select the rows whose voltage column is bigger than 215 and limit the output to only 2 rows. +For example, the SQL statement below can be executed in TDengine CLI `taos` to select records with voltage greater than 215 and limit the output to only 2 rows. ```sql select * from d1001 where voltage > 215 order by ts desc limit 2; @@ -46,46 +46,46 @@ taos> select * from d1001 where voltage > 215 order by ts desc limit 2; Query OK, 2 row(s) in set (0.001100s) ``` -To meet the requirements in many use cases, some special functions have been added in TDengine, for example `twa` (Time Weighted Average), `spared` (The difference between the maximum and the minimum), `last_row` (the last row), more and more functions will be added to better perform in many use cases. Furthermore, continuous query is also supported in TDengine. +To meet the requirements of varied use cases, some special functions have been added in TDengine. Some examples are `twa` (Time Weighted Average), `spread` (The difference between the maximum and the minimum), and `last_row` (the last row). Furthermore, continuous query is also supported in TDengine. For detailed query syntax please refer to [Select](/taos-sql/select). ## Aggregation among Tables -In many use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviated for super table), is used in TDengine to represent a kind of data collection points, and a table is used to represent a specific data collection point. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same kind of data collection points, can be. Aggregate functions applicable for tables can be used directly on STables, syntax is exactly same. +In most use cases, there are always multiple kinds of data collection points. A new concept, called STable (abbreviation for super table), is used in TDengine to represent one type of data collection point, and a subtable is used to represent a specific data collection point of that type. Tags are used by TDengine to represent the static properties of data collection points. A specific data collection point has its own values for static properties. By specifying filter conditions on tags, aggregation can be performed efficiently among all the subtables created via the same STable, i.e. same type of data collection points. Aggregate functions applicable for tables can be used directly on STables; the syntax is exactly the same. -In summary, for a STable, its subtables can be aggregated by a simple query on STable, it's kind of join operation. But tables belong to different STables could not be aggregated. +In summary, records across subtables can be aggregated by a simple query on their STable. It is like a join operation. However, tables belonging to different STables can not be aggregated. ### Example 1 -In TDengine CLI `taos`, use below SQL to get the average voltage of all the meters in BeiJing grouped by location. +In TDengine CLI `taos`, use the SQL below to get the average voltage of all the meters in California grouped by location. ``` taos> SELECT AVG(voltage) FROM meters GROUP BY location; avg(voltage) | location | ============================================================= - 222.000000000 | Beijing.Haidian | - 219.200000000 | Beijing.Chaoyang | + 222.000000000 | California.LosAngeles | + 219.200000000 | California.SanFrancisco | Query OK, 2 row(s) in set (0.002136s) ``` ### Example 2 -In TDengine CLI `taos`, use below SQL to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2. +In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2. ``` taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h; - cunt(*) | max(current) | + count(*) | max(current) | ================================== 5 | 13.4 | Query OK, 1 row(s) in set (0.002136s) ``` -Join query is allowed between only the tables of same STable. In [Select](/taos-sql/select), all query operations are marked as whether it supports STable or not. +Join queries are only allowed between subtables of the same STable. In [Select](/taos-sql/select), all query operations are marked as to whether they support STables or not. ## Down Sampling and Interpolation -In IoT use cases, down sampling is widely used to aggregate the data by time range. `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, below SQL statement can be used to get the sum of current every 10 seconds from meters table d1001. +In IoT use cases, down sampling is widely used to aggregate data by time range. The `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, the SQL statement below can be used to get the sum of current every 10 seconds from meters table d1001. ``` taos> SELECT sum(current) FROM d1001 INTERVAL(10s); @@ -96,10 +96,10 @@ taos> SELECT sum(current) FROM d1001 INTERVAL(10s); Query OK, 2 row(s) in set (0.000883s) ``` -Down sampling can also be used for STable. For example, below SQL statement can be used to get the sum of current from all meters in BeiJing. +Down sampling can also be used for STable. For example, the below SQL statement can be used to get the sum of current from all meters in California. ``` -taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s); +taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s); ts | sum(current) | ====================================================== 2018-10-03 14:38:04.000 | 10.199999809 | @@ -110,7 +110,7 @@ taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s Query OK, 5 row(s) in set (0.001538s) ``` -Down sampling also supports time offset. For example, below SQL statement can be used to get the sum of current from all meters but each time window must start at the boundary of 500 milliseconds. +Down sampling also supports time offset. For example, the below SQL statement can be used to get the sum of current from all meters but each time window must start at the boundary of 500 milliseconds. ``` taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); @@ -124,7 +124,7 @@ taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); Query OK, 5 row(s) in set (0.001521s) ``` -In many use cases, it's hard to align the timestamp of the data collected by each collection point. However, a lot of algorithms like FFT require the data to be aligned with same time interval and application programs have to handle by themselves in many systems. In TDengine, it's easy to achieve the alignment using down sampling. +In many use cases, it's hard to align the timestamp of the data collected by each collection point. However, a lot of algorithms like FFT require the data to be aligned with same time interval and application programs have to handle this by themselves. In TDengine, it's easy to achieve the alignment using down sampling. Interpolation can be performed in TDengine if there is no data in a time range. @@ -162,16 +162,16 @@ In the section describing [Insert](/develop/insert-data/sql-writing), a database :::note -1. With either REST connection or native connection, the above sample code work well. -2. Please be noted that `use db` can't be used in case of REST connection because it's stateless. +1. With either REST connection or native connection, the above sample code works well. +2. Please note that `use db` can't be used in case of REST connection because it's stateless. ::: ### Asynchronous Query -Besides synchronous query, asynchronous query API is also provided by TDengine to insert or query data more efficiently. With similar hardware and software environment, async API is 2~4 times faster than sync APIs. Async API works in non-blocking mode, which means an operation can be returned without finishing so that the calling thread can switch to other works to improve the performance of the whole application system. Async APIs perform especially better in case of poor network. +Besides synchronous queries, an asynchronous query API is also provided by TDengine to insert or query data more efficiently. With a similar hardware and software environment, the async API is 2~4 times faster than sync APIs. Async API works in non-blocking mode, which means an operation can be returned without finishing so that the calling thread can switch to other work to improve the performance of the whole application system. Async APIs perform especially better in the case of poor networks. -Please be noted that async query can only be used with native connection. +Please note that async query can only be used with a native connection. diff --git a/docs-en/07-develop/05-continuous-query.mdx b/docs-en/07-develop/05-continuous-query.mdx new file mode 100644 index 0000000000000000000000000000000000000000..1aea5783fc8116a4e02a4b5345d341707cd399ea --- /dev/null +++ b/docs-en/07-develop/05-continuous-query.mdx @@ -0,0 +1,83 @@ +--- +sidebar_label: Continuous Query +description: "Continuous query is a query that's executed automatically at a predefined frequency to provide aggregate query capability by time window. It is essentially simplified, time driven, stream computing." +title: "Continuous Query" +--- + +A continuous query is a query that's executed automatically at a predefined frequency to provide aggregate query capability by time window. It is essentially simplified, time driven, stream computing. A continuous query can be performed on a table or STable in TDengine. The results of a continuous query can be pushed to clients or written back to TDengine. Each query is executed on a time window, which moves forward with time. The size of time window and the forward sliding time need to be specified with parameter `INTERVAL` and `SLIDING` respectively. + +A continuous query in TDengine is time driven, and can be defined using TAOS SQL directly without any extra operations. With a continuous query, the result can be generated based on a time window to achieve down sampling of the original data. Once a continuous query is defined using TAOS SQL, the query is automatically executed at the end of each time window and the result is pushed back to clients or written to TDengine. + +There are some differences between continuous query in TDengine and time window computation in stream computing: + +- The computation is performed and the result is returned in real time in stream computing, but the computation in continuous query is only started when a time window closes. For example, if the time window is 1 day, then the result will only be generated at 23:59:59. +- If a historical data row is written in to a time window for which the computation has already finished, the computation will not be performed again and the result will not be pushed to client applications again. If the results have already been written into TDengine, they will not be updated. +- In continuous query, if the result is pushed to a client, the client status is not cached on the server side and Exactly-once is not guaranteed by the server. If the client program crashes, a new time window will be generated from the time where the continuous query is restarted. If the result is written into TDengine, the data written into TDengine can be guaranteed as valid and continuous. + +## Syntax + +```sql +[CREATE TABLE AS] SELECT select_expr [, select_expr ...] + FROM {tb_name_list} + [WHERE where_condition] + [INTERVAL(interval_val [, interval_offset]) [SLIDING sliding_val]] + +``` + +INTERVAL: The time window for which continuous query is performed + +SLIDING: The time step for which the time window moves forward each time + +## How to Use + +In this section the use case of meters will be used to introduce how to use continuous query. Assume the STable and subtables have been created using the SQL statements below. + +```sql +create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int); +create table D1001 using meters tags ("California.SanFrancisco", 2); +create table D1002 using meters tags ("California.LosAngeles", 2); +``` + +The SQL statement below retrieves the average voltage for a one minute time window, with each time window moving forward by 30 seconds. + +```sql +select avg(voltage) from meters interval(1m) sliding(30s); +``` + +Whenever the above SQL statement is executed, all the existing data will be computed again. If the computation needs to be performed every 30 seconds automatically to compute on the data in the past one minute, the above SQL statement needs to be revised as below, in which `{startTime}` stands for the beginning timestamp in the latest time window. + +```sql +select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s); +``` + +An easier way to achieve this is to prepend `create table {tableName} as` before the `select`. + +```sql +create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s); +``` + +A table named as `avg_vol` will be created automatically, then every 30 seconds the `select` statement will be executed automatically on the data in the past 1 minute, i.e. the latest time window, and the result is written into table `avg_vol`. The client program just needs to query from table `avg_vol`. For example: + +```sql +taos> select * from avg_vol; + ts | avg_voltage_ | +=================================================== + 2020-07-29 13:37:30.000 | 222.0000000 | + 2020-07-29 13:38:00.000 | 221.3500000 | + 2020-07-29 13:38:30.000 | 220.1700000 | + 2020-07-29 13:39:00.000 | 223.0800000 | +``` + +Please note that the minimum allowed time window is 10 milliseconds, and there is no upper limit. + +It's possible to specify the start and end time of a continuous query. If the start time is not specified, the timestamp of the first row will be considered as the start time; if the end time is not specified, the continuous query will be performed indefinitely, otherwise it will be terminated once the end time is reached. For example, the continuous query in the SQL statement below will be started from now and terminated one hour later. + +```sql +create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s); +``` + +`now` in the above SQL statement stands for the time when the continuous query is created, not the time when the computation is actually performed. To avoid the trouble caused by a delay in receiving data as much as possible, the actual computation in a continuous query is started after a little delay. That means, once a time window closes, the computation is not started immediately. Normally, the result are available after a little time, normally within one minute, after the time window closes. + +## How to Manage + +`show streams` command can be used in the TDengine CLI `taos` to show all the continuous queries in the system, and `kill stream` can be used to terminate a continuous query. diff --git a/docs-en/07-develop/06-subscribe.mdx b/docs-en/07-develop/06-subscribe.mdx new file mode 100644 index 0000000000000000000000000000000000000000..782fcdbaf221419dd231bd10958e26b8f4f856e5 --- /dev/null +++ b/docs-en/07-develop/06-subscribe.mdx @@ -0,0 +1,259 @@ +--- +sidebar_label: Data Subscription +description: "Lightweight service for data subscription and publishing. Time series data inserted into TDengine continuously can be pushed automatically to subscribing clients." +title: Data Subscription +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; +import Java from "./_sub_java.mdx"; +import Python from "./_sub_python.mdx"; +import Go from "./_sub_go.mdx"; +import Rust from "./_sub_rust.mdx"; +import Node from "./_sub_node.mdx"; +import CSharp from "./_sub_cs.mdx"; +import CDemo from "./_sub_c.mdx"; + +## Introduction + +Due to the nature of time series data, data insertion into TDengine is similar to data publishing in message queues. Data is stored in ascending order of timestamp inside TDengine, and so each table in TDengine can essentially be considered as a message queue. + +A lightweight service for data subscription and publishing is built into TDengine. With the API provided by TDengine, client programs can use `select` statements to subscribe to data from one or more tables. The subscription and state maintenance is performed on the client side. The client programs poll the server to check whether there is new data, and if so the new data will be pushed back to the client side. If the client program is restarted, where to start retrieving new data is up to the client side. + +There are 3 major APIs related to subscription provided in the TDengine client driver. + +```c +taos_subscribe +taos_consume +taos_unsubscribe +``` + +For more details about these APIs please refer to [C/C++ Connector](/reference/connector/cpp). Their usage will be introduced below using the use case of meters, in which the schema of STable and subtables from the previous section [Continuous Query](/develop/continuous-query) are used. Full sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c). + +If we want to get a notification and take some actions if the current exceeds a threshold, like 10A, from some meters, there are two ways: + +The first way is to query each sub table and record the last timestamp matching the criteria. Then after some time, query the data later than the recorded timestamp, and repeat this process. The SQL statements for this way are as below. + +```sql +select * from D1001 where ts > {last_timestamp1} and current > 10; +select * from D1002 where ts > {last_timestamp2} and current > 10; +... +``` + +The above way works, but the problem is that the number of `select` statements increases with the number of meters. Additionally, the performance of both client side and server side will be unacceptable once the number of meters grows to a big enough number. + +A better way is to query on the STable, only one `select` is enough regardless of the number of meters, like below: + +```sql +select * from meters where ts > {last_timestamp} and current > 10; +``` + +However, this presents a new problem in how to choose `last_timestamp`. First, the timestamp when the data is generated is different from the timestamp when the data is inserted into the database, sometimes the difference between them may be very big. Second, the time when the data from different meters arrives at the database may be different too. If the timestamp of the "slowest" meter is used as `last_timestamp` in the query, the data from other meters may be selected repeatedly; but if the timestamp of the "fastest" meter is used as `last_timestamp`, some data from other meters may be missed. + +All the problems mentioned above can be resolved easily using the subscription functionality provided by TDengine. + +The first step is to create subscription using `taos_subscribe`. + +```c +TAOS_SUB* tsub = NULL; +if (async) { +  // create an asynchronous subscription, the callback function will be called every 1s +  tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000); +} else { +  // create an synchronous subscription, need to call 'taos_consume' manually +  tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0); +} +``` + +The subscription in TDengine can be either synchronous or asynchronous. In the above sample code, the value of variable `async` is determined from the CLI input, then it's used to create either an async or sync subscription. Sync subscription means the client program needs to invoke `taos_consume` to retrieve data, and async subscription means another thread created by `taos_subscribe` internally invokes `taos_consume` to retrieve data and pass the data to `subscribe_callback` for processing. `subscribe_callback` is a callback function provided by the client program. You should not perform time consuming operations in the callback function. + +The parameter `taos` is an established connection. Nothing special needs to be done for thread safety for synchronous subscription. For asynchronous subscription, the taos_subscribe function should be called exclusively by the current thread, to avoid unpredictable errors. + +The parameter `sql` is a `select` statement in which the `where` clause can be used to specify filter conditions. In our example, we can subscribe to the records in which the current exceeds 10A, with the following SQL statement: + +```sql +select * from meters where current > 10; +``` + +Please note that, all the data will be processed because no start time is specified. If we only want to process data for the past day, a time related condition can be added: + +```sql +select * from meters where ts > now - 1d and current > 10; +``` + +The parameter `topic` is the name of the subscription. The client application must guarantee that the name is unique. However, it doesn't have to be globally unique because subscription is implemented in the APIs on the client side. + +If the subscription named as `topic` doesn't exist, the parameter `restart` will be ignored. If the subscription named as `topic` has been created before by the client program, when the client program is restarted with the subscription named `topic`, parameter `restart` is used to determine whether to retrieve data from the beginning or from the last point where the subscription was broken. + +If the value of `restart` is **true** (i.e. a non-zero value), data will be retrieved from the beginning. If it is **false** (i.e. zero), the data already consumed before will not be processed again. + +The last parameter of `taos_subscribe` is the polling interval in units of millisecond. In sync mode, if the time difference between two continuous invocations to `taos_consume` is smaller than the interval specified by `taos_subscribe`, `taos_consume` will be blocked until the interval is reached. In async mode, this interval is the minimum interval between two invocations to the call back function. + +The second to last parameter of `taos_subscribe` is used to pass arguments to the call back function. `taos_subscribe` doesn't process this parameter and simply passes it to the call back function. This parameter is simply ignored in sync mode. + +After a subscription is created, its data can be consumed and processed. Shown below is the sample code to consume data in sync mode, in the else condition of `if (async)`. + +```c +if (async) { +  getchar(); +} else while(1) { +  TAOS_RES* res = taos_consume(tsub); +  if (res == NULL) { +    printf("failed to consume data."); +    break; +  } else { +    print_result(res, blockFetch); +    getchar(); +  } +} +``` + +In the above sample code in the else condition, there is an infinite loop. Each time carriage return is entered `taos_consume` is invoked. The return value of `taos_consume` is the selected result set. In the above sample, `print_result` is used to simplify the printing of the result set. It is similar to `taos_use_result`. Below is the implementation of `print_result`. + +```c +void print_result(TAOS_RES* res, int blockFetch) { +  TAOS_ROW row = NULL; +  int num_fields = taos_num_fields(res); +  TAOS_FIELD* fields = taos_fetch_fields(res); +  int nRows = 0; +  if (blockFetch) { +    nRows = taos_fetch_block(res, &row); +    for (int i = 0; i < nRows; i++) { +      char temp[256]; +      taos_print_row(temp, row + i, fields, num_fields); +      puts(temp); +    } +  } else { +    while ((row = taos_fetch_row(res))) { +      char temp[256]; +      taos_print_row(temp, row, fields, num_fields); +      puts(temp); +      nRows++; +    } +  } +  printf("%d rows consumed.\n", nRows); +} +``` + +In the above code `taos_print_row` is used to process the data consumed. All matching rows are printed. + +In async mode, consuming data is simpler as shown below. + +```c +void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { +  print_result(res, *(int*)param); +} +``` + +`taos_unsubscribe` can be invoked to terminate a subscription. + +```c +taos_unsubscribe(tsub, keep); +``` + +The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription(Note: The default value of `DataDir` in the `taos.cfg` file is **/var/lib/taos/**. However, **/var/lib/taos/** does not exist on the Windows server. So you need to change the `DataDir` value to the corresponding existing directory."), the subscription will be restarted from the beginning if the corresponding progress file is removed. + +Now let's see the effect of the above sample code, assuming below prerequisites have been done. + +- The sample code has been downloaded to local system +- TDengine has been installed and launched properly on same system +- The database, STable, and subtables required in the sample code are ready + +Launch the command below in the directory where the sample code resides to compile and start the program. + +```bash +make +./subscribe -sql='select * from meters where current > 10;' +``` + +After the program is started, open another terminal and launch TDengine CLI `taos`, then use the below SQL commands to insert a row whose current is 12A into table **D1001**. + +```sql +use test; +insert into D1001 values(now, 12, 220, 1); +``` + +Then, this row of data will be shown by the example program on the first terminal because its current exceeds 10A. More data can be inserted for you to observe the output of the example program. + +## Examples + +The example program below demonstrates how to subscribe, using connectors, to data rows in which current exceeds 10A. + +### Prepare Data + +```bash +# create database "power" +taos> create database power; +# use "power" as the database in following operations +taos> use power; +# create super table "meters" +taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int); +# create tabes using the schema defined by super table "meters" +taos> create table d1001 using meters tags ("California.SanFrancisco", 2); +taos> create table d1002 using meters tags ("California.LoSangeles", 2); +# insert some rows +taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1); +taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1); +# filter out the rows in which current is bigger than 10A +taos> select * from meters where current > 10; + ts | current | voltage | phase | location | groupid | +=========================================================================================================== + 2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LoSangeles | 2 | + 2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LoSangeles | 2 | + 2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 | + 2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 | + 2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 | +Query OK, 5 row(s) in set (0.004896s) +``` + +### Example Programs + + + + + + + + + {/* + + */} + + + + {/* + + + + + */} + + + + + +### Run the Examples + +The example programs first consume all historical data matching the criteria. + +```bash +ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 +ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2 +ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2 +ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2 +ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2 +``` + +Next, use TDengine CLI to insert a new row. + +``` +# taos +taos> use power; +taos> insert into d1001 values(now, 12.4, 220, 1); +``` + +Because the current in the inserted row exceeds 10A, it will be consumed by the example program. + +``` +ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2 +``` diff --git a/docs-en/07-develop/07-cache.md b/docs-en/07-develop/07-cache.md new file mode 100644 index 0000000000000000000000000000000000000000..743452faff6a2be8466318a7dab61a44e33c3664 --- /dev/null +++ b/docs-en/07-develop/07-cache.md @@ -0,0 +1,19 @@ +--- +sidebar_label: Cache +title: Cache +description: "The latest row of each table is kept in cache to provide high performance query of latest state." +--- + +The cache management policy in TDengine is First-In-First-Out (FIFO). FIFO is also known as insert driven cache management policy and it is different from read driven cache management, which is more commonly known as Least-Recently-Used (LRU). FIFO simply stores the latest data in cache and flushes the oldest data in cache to disk, when the cache usage reaches a threshold. In IoT use cases, it is the current state i.e. the latest or most recent data that is important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data. + +Caching the latest data provides the capability of retrieving data in milliseconds. With this capability, TDengine can be configured properly to be used as a caching system without deploying another separate caching system. This simplifies the system architecture and minimizes operational costs. The cache is emptied after TDengine is restarted. TDengine does not reload data from disk into cache, like a key-value caching system. + +The memory space used by the TDengine cache is fixed in size and configurable. It should be allocated based on application requirements and system resources. An independent memory pool is allocated for and managed by each vnode (virtual node) in TDengine. There is no sharing of memory pools between vnodes. All the tables belonging to a vnode share all the cache memory of the vnode. + +The memory pool is divided into blocks and data is stored in row format in memory and each block follows FIFO policy. The size of each block is determined by configuration parameter `cache` and the number of blocks for each vnode is determined by the parameter `blocks`. For each vnode, the total cache size is `cache * blocks`. A cache block needs to ensure that each table can store at least dozens of records, to be efficient. + +`last_row` function can be used to retrieve the last row of a table or a STable to quickly show the current state of devices on monitoring screen. For example the below SQL statement retrieves the latest voltage of all meters in San Francisco, California. + +```sql +select last_row(voltage) from meters where location='California.SanFrancisco'; +``` diff --git a/docs-en/07-develop/08-udf.md b/docs-en/07-develop/08-udf.md new file mode 100644 index 0000000000000000000000000000000000000000..49bc95bd91a4c31d42d2b21ef05d69225f1bd963 --- /dev/null +++ b/docs-en/07-develop/08-udf.md @@ -0,0 +1,240 @@ +--- +sidebar_label: UDF +title: User Defined Functions(UDF) +description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand query capability" +--- + +In some use cases, built-in functions are not adequate for the query capability required by application programs. With UDF, the functions developed by users can be utilized by the query framework to meet business and application requirements. UDF normally takes one column of data as input, but can also support the result of a sub-query as input. + +From version 2.2.0.0, UDF written in C/C++ are supported by TDengine. + + +## Types of UDF + +Two kinds of functions can be implemented by UDF: scalar functions and aggregate functions. + +Scalar functions return multiple rows and aggregate functions return either 0 or 1 row. + +In the case of a scalar function you only have to implement the "normal" function template. + +In the case of an aggregate function, in addition to the "normal" function, you also need to implement the "merge" and "finalize" function templates even if the implementation is empty. This will become clear in the sections below. + +### Scalar Function + +As mentioned earlier, a scalar UDF only has to implement the "normal" function template. The function template below can be used to define your own scalar function. + +`void udfNormalFunc(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)` + +`udfNormalFunc` is the place holder for a function name. A function implemented based on the above template can be used to perform scalar computation on data rows. The parameters are fixed to control the data exchange between UDF and TDengine. + +- Definitions of the parameters: + + - data:input data + - itype:the type of input data, for details please refer to [type definition in column_meta](/reference/rest-api/), for example 4 represents INT + - iBytes:the number of bytes consumed by each value in the input data + - oType:the type of output data, similar to iType + - oBytes:the number of bytes consumed by each value in the output data + - numOfRows:the number of rows in the input data + - ts: the column of timestamp corresponding to the input data + - dataOutput:the buffer for output data, total size is `oBytes * numberOfRows` + - interBuf:the buffer for an intermediate result. Its size is specified by the `BUFSIZE` parameter when creating a UDF. It's normally used when the intermediate result is not same as the final result. This buffer is allocated and freed by TDengine. + - tsOutput:the column of timestamps corresponding to the output data; it can be used to output timestamp together with the output data if it's not NULL + - numOfOutput:the number of rows in output data + - buf:for the state exchange between UDF and TDengine + + [add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) is one example of a very simple UDF implementation, i.e. one instance of the above `udfNormalFunc` template. It adds one to each value of a passed in column, which can be filtered using the `where` clause, and outputs the result. + +### Aggregate Function + +For aggregate UDF, as mentioned earlier you must implement a "normal" function template (described above) and also implement the "merge" and "finalize" templates. + +#### Merge Function Template + +The function template below can be used to define your own merge function for an aggregate UDF. + +`void udfMergeFunc(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)` + +`udfMergeFunc` is the place holder for a function name. The function implemented with the above template is used to aggregate intermediate results and can only be used in the aggregate query for STable. + +Definitions of the parameters: + +- data:array of output data, if interBuf is used it's an array of interBuf +- numOfRows:number of rows in `data` +- dataOutput:the buffer for output data, the size is same as that of the final result; If the result is not final, it can be put in the interBuf, i.e. `data`. +- numOfOutput:number of rows in the output data +- buf:for the state exchange between UDF and TDengine + +#### Finalize Function Template + +The function template below can be used to finalize the result of your own UDF, normally used when interBuf is used. + +`void udfFinalizeFunc(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)` + +`udfFinalizeFunc` is the place holder of function name, definitions of the parameter are as below: + +- dataOutput:buffer for output data +- interBuf:buffer for intermediate result, can be used as input for next processing step +- numOfOutput:number of output data, can only be 0 or 1 for aggregate function +- buf:for state exchange between UDF and TDengine + +### Example abs_max.c + +[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) is an example of a user defined aggregate function to get the maximum from the absolute values of a column. + +The internal processing happens as follows. The results of the select statement are divided into multiple row blocks and `udfNormalFunc`, i.e. `abs_max` in this case, is performed on each row block to generate the intermediate results for each sub table. Then `udfMergeFunc`, i.e. `abs_max_merge` in this case, is performed on the intermediate result of sub tables to aggregate and generate the final or intermediate result of STable. The intermediate result of STable is finally processed by `udfFinalizeFunc`, i.e. `abs_max_finalize` in this example, to generate the final result, which contains either 0 or 1 row. + +Other typical aggregation functions such as covariance, can also be implemented using aggregate UDF. + +## UDF Naming Conventions + +The naming convention for the 3 kinds of function templates required by UDF is as follows: + - udfNormalFunc, udfMergeFunc, and udfFinalizeFunc are required to have same prefix, i.e. the actual name of udfNormalFunc. The udfNormalFunc doesn't need a suffix following the function name. + - udfMergeFunc should be udfNormalFunc followed by `_merge` + - udfFinalizeFunc should be udfNormalFunc followed by `_finalize`. + +The naming convention is part of TDengine's UDF framework. TDengine follows this convention to invoke the corresponding actual functions. + +Depending on whether you are creating a scalar UDF or aggregate UDF, the functions that you need to implement are different. + +- Scalar function:udfNormalFunc is required. +- Aggregate function:udfNormalFunc, udfMergeFunc (if query on STable) and udfFinalizeFunc are required. + +For clarity, assuming we want to implement a UDF named "foo": +- If the function is a scalar function, we only need to implement the "normal" function template and it should be named simply `foo`. +- If the function is an aggregate function, we need to implement `foo`, `foo_merge`, and `foo_finalize`. Note that for aggregate UDF, even though one of the three functions is not necessary, there must be an empty implementation. + +## Compile UDF + +The source code of UDF in C can't be utilized by TDengine directly. UDF can only be loaded into TDengine after compiling to dynamically linked library (DLL). + +For example, the example UDF `add_one.c` mentioned earlier, can be compiled into DLL using the command below, in a Linux Shell. + +```bash +gcc -g -O0 -fPIC -shared add_one.c -o add_one.so +``` + +The generated DLL file `add_one.so` can be used later when creating a UDF. It's recommended to use GCC not older than 7.5. + +## Create and Use UDF + +When a UDF is created in a TDengine instance, it is available across the databases in that instance. + +### Create UDF + +SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted. + +When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input type and output type don't need to be the same in UDF, but the input data type and output data type must be consistent with the UDF definition. + +- Create Scalar Function + +```sql +CREATE FUNCTION userDefinedFunctionName AS "/absolute/path/to/userDefinedFunctionName.so" OUTPUTTYPE [BUFSIZE B]; +``` + +- userDefinedFunctionName:The function name to be used in SQL statement which must be consistent with the function name defined by `udfNormalFunc` and is also the name of the compiled DLL (.so file). +- path:The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes. +- outputtype:The output data type, the value is the literal string of the supported TDengine data type. +- B:the size of intermediate buffer, in bytes; it is an optional parameter and the range is [0,512]. + +For example, below SQL statement can be used to create a UDF from `add_one.so`. + +```sql +CREATE FUNCTION add_one AS "/home/taos/udf_example/add_one.so" OUTPUTTYPE INT; +``` + +- Create Aggregate Function + +```sql +CREATE AGGREGATE FUNCTION userDefinedFunctionName AS "/absolute/path/to/userDefinedFunctionName.so" OUTPUTTYPE [ BUFSIZE B ]; +``` + +- userDefinedFunctionName:the function name to be used in SQL statement which must be consistent with the function name defined by `udfNormalFunc` and is also the name of the compiled DLL (.so file). +- path:the absolute path of the DLL file including the name of the shared object file (.so). The path needs to be quoted by single or double quotes. +- OUTPUTTYPE:the output data type, the value is the literal string of the type +- B:the size of intermediate buffer, in bytes; it's an optional parameter and the range is [0,512] + +For details about how to use intermediate result, please refer to example program [demo.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c). + +For example, below SQL statement can be used to create a UDF from `demo.so`. + +```sql +CREATE AGGREGATE FUNCTION demo AS "/home/taos/udf_example/demo.so" OUTPUTTYPE DOUBLE bufsize 14; +``` + +### Manage UDF + +- Delete UDF + +``` +DROP FUNCTION ids(X); +``` + +- ids(X):same as that in `CREATE FUNCTION` statement + +```sql +DROP FUNCTION add_one; +``` + +- Show Available UDF + +```sql +SHOW FUNCTIONS; +``` + +### Use UDF + +The function name specified when creating UDF can be used directly in SQL statements, just like builtin functions. + +```sql +SELECT X(c) FROM table/STable; +``` + +The above SQL statement invokes function X for column c. + +## Restrictions for UDF + +In current version there are some restrictions for UDF + +1. Only Linux is supported when creating and invoking UDF for both client side and server side +2. UDF can't be mixed with builtin functions +3. Only one UDF can be used in a SQL statement +4. Only a single column is supported as input for UDF +5. Once created successfully, UDF is persisted in MNode of TDengineUDF +6. UDF can't be created through REST interface +7. The function name used when creating UDF in SQL must be consistent with the function name defined in the DLL, i.e. the name defined by `udfNormalFunc` +8. The name of a UDF should not conflict with any of TDengine's built-in functions + +## Examples + +### Scalar function example [add_one](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) + +
+add_one.c + +```c +{{#include tests/script/sh/add_one.c}} +``` + +
+ +### Aggregate function example [abs_max](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) + +
+abs_max.c + +```c +{{#include tests/script/sh/abs_max.c}} +``` + +
+ +### Example for using intermediate result [demo](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/demo.c) + +
+demo.c + +```c +{{#include tests/script/sh/demo.c}} +``` + +
diff --git a/docs-en/07-develop/_category_.yml b/docs-en/07-develop/_category_.yml new file mode 100644 index 0000000000000000000000000000000000000000..6f0d66351a5c326eb2dced998e29e668d11cd1ca --- /dev/null +++ b/docs-en/07-develop/_category_.yml @@ -0,0 +1 @@ +label: Developer Guide \ No newline at end of file diff --git a/docs-en/04-develop/_sub_c.mdx b/docs-en/07-develop/_sub_c.mdx similarity index 100% rename from docs-en/04-develop/_sub_c.mdx rename to docs-en/07-develop/_sub_c.mdx diff --git a/docs-en/04-develop/_sub_cs.mdx b/docs-en/07-develop/_sub_cs.mdx similarity index 100% rename from docs-en/04-develop/_sub_cs.mdx rename to docs-en/07-develop/_sub_cs.mdx diff --git a/docs-en/04-develop/_sub_go.mdx b/docs-en/07-develop/_sub_go.mdx similarity index 100% rename from docs-en/04-develop/_sub_go.mdx rename to docs-en/07-develop/_sub_go.mdx diff --git a/docs-en/04-develop/_sub_java.mdx b/docs-en/07-develop/_sub_java.mdx similarity index 100% rename from docs-en/04-develop/_sub_java.mdx rename to docs-en/07-develop/_sub_java.mdx diff --git a/docs-en/04-develop/_sub_node.mdx b/docs-en/07-develop/_sub_node.mdx similarity index 100% rename from docs-en/04-develop/_sub_node.mdx rename to docs-en/07-develop/_sub_node.mdx diff --git a/docs-en/04-develop/_sub_python.mdx b/docs-en/07-develop/_sub_python.mdx similarity index 100% rename from docs-en/04-develop/_sub_python.mdx rename to docs-en/07-develop/_sub_python.mdx diff --git a/docs-en/04-develop/_sub_rust.mdx b/docs-en/07-develop/_sub_rust.mdx similarity index 100% rename from docs-en/04-develop/_sub_rust.mdx rename to docs-en/07-develop/_sub_rust.mdx diff --git a/docs-en/07-develop/index.md b/docs-en/07-develop/index.md new file mode 100644 index 0000000000000000000000000000000000000000..e3f55f290753f79ac1708337082ce90bb050b21f --- /dev/null +++ b/docs-en/07-develop/index.md @@ -0,0 +1,25 @@ +--- +title: Developer Guide +--- + +To develop an application to process time-series data using TDengine, we recommend taking the following steps: + +1. Choose the method to connect to TDengine. No matter what programming language you use, you can always use the REST interface to access TDengine, but you can also use connectors unique to each programming language. +2. Design the data model based on your own use cases. Learn the [concepts](/concept/) of TDengine including "one table for one data collection point" and the "super table" (STable) concept; learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you may decide to create one or more databases, and you should design the STable schema to fit your data. +3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually. +4. Based on business requirements, find out what SQL query statements need to be written. You may be able to repurpose any existing SQL. +5. If you want to run real-time analysis based on time series data, including various dashboards, it is recommended that you use the TDengine continuous query feature instead of deploying complex streaming processing systems such as Spark or Flink. +6. If your application has modules that need to consume inserted data, and they need to be notified when new data is inserted, it is recommended that you use the data subscription function provided by TDengine without the need to deploy Kafka. +7. In many use cases (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately. +8. If you find that the SQL functions of TDengine cannot meet your requirements, then you can use user-defined functions to solve the problem. + +This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, please read the [SQL manual](/taos-sql/). For a more in-depth understanding of the use of each connector, please read the [Connector Reference Guide](/reference/connector/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](/third-party/). + +If you encounter any problems during the development process, please click ["Submit an issue"](https://github.com/taosdata/TDengine/issues/new/choose) at the bottom of each page and submit it on GitHub right away. + +```mdx-code-block +import DocCardList from '@theme/DocCardList'; +import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; + + +``` diff --git a/docs-en/10-cluster/01-deploy.md b/docs-en/10-cluster/01-deploy.md index a81ea0e316bba88b1fec7548a9d143cd44da2a14..200da1be3f8185818bd21dd3fcdc78c124a36831 100644 --- a/docs-en/10-cluster/01-deploy.md +++ b/docs-en/10-cluster/01-deploy.md @@ -6,29 +6,35 @@ title: Deployment ### Step 1 -The FQDN of all hosts need to be setup properly, all the FQDNs need to be configured in the /etc/hosts of each host. It must be guaranteed that each FQDN can be accessed (by ping, for example) from any other hosts. +The FQDN of all hosts must be setup properly. For e.g. FQDNs may have to be configured in the /etc/hosts file on each host. You must confirm that each FQDN can be accessed from any other host. For e.g. you can do this by using the `ping` command. -On each host command `hostname -f` can be executed to get the hostname. `ping` command can be executed on each host to check whether any other host is accessible from it. If any host is not accessible, the network configuration, like /etc/hosts or DNS configuration, need to be checked and revised to make any two hosts accessible to each other. +To get the hostname on any host, the command `hostname -f` can be executed. `ping ` command can be executed on each host to check whether any other host is accessible from it. If any host is not accessible, the network configuration, like /etc/hosts or DNS configuration, needs to be checked and revised, to make any two hosts accessible to each other. :::note -- The host where the client program runs also needs to configured properly for FQDN, to make sure all hosts for client or server can be accessed from any other. In other words, the hosts where the client is running are also considered as a part of the cluster. +- The host where the client program runs also needs to be configured properly for FQDN, to make sure all hosts for client or server can be accessed from any other. In other words, the hosts where the client is running are also considered as a part of the cluster. -- It's suggested to disable the firewall for all hosts in the cluster. At least TCP/UDP for port 6030~6042 need to be open if firewall is enabled. +- Please ensure that your firewall rules do not block TCP/UDP on ports 6030-6042 on all hosts in the cluster. ::: ### Step 2 -If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`. +If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`. + +:::note + +As a best practice, before cleaning up any data files or directories, please ensure that your data has been backed up correctly, if required by your data integrity, backup, security, or other standard operating protocols (SOP). + +::: ### Step 3 -Now it's time to install TDengine on all hosts without starting `taosd`, the versions on all hosts should be same. If it's prompted to input the existing TDengine cluster, simply press carriage return to ignore it. `install.sh -e no` can also be used to disable this prompt. For details please refer to [Install and Uninstall](/operation/pkg-install). +Now it's time to install TDengine on all hosts but without starting `taosd`. Note that the versions on all hosts should be same. If you are prompted to input the existing TDengine cluster, simply press carriage return to ignore the prompt. `install.sh -e no` can also be used to disable this prompt. For details please refer to [Install and Uninstall](/operation/pkg-install). ### Step 4 -Now each physical node (referred to as `dnode` hereinafter, it's abbreviation for "data node") of TDengine need to be configured properly. Please be noted that one dnode doesn't stand for one host, multiple TDengine nodes can be started on single host as long as they are configured properly without conflicting. More specifically each instance of the configuration file `taos.cfg` stands for a dnode. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following. +Now each physical node (referred to, hereinafter, as `dnode` which is an abbreviation for "data node") of TDengine needs to be configured properly. Please note that one dnode doesn't stand for one host. Multiple TDengine dnodes can be started on a single host as long as they are configured properly without conflicting. More specifically each instance of the configuration file `taos.cfg` stands for a dnode. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following. ```c // firstEp is the end point to connect to when any dnode starts @@ -44,9 +50,9 @@ serverPort 6030 #arbitrator ha.taosdata.com:6042 ``` -`firstEp` and `fqdn` must be configured properly. In `taos.cfg` of all dnodes in TDengine cluster, `firstEp` must be configured to point to same address, i.e. the first dnode of the cluster. `fqdn` and `serverPort` compose the address of each node itself. If you want to start multiple TDengine dnodes on a single host, please also make sure all other configurations like `dataDir`, `logDir`, and other resources related parameters are not conflicting. +`firstEp` and `fqdn` must be configured properly. In `taos.cfg` of all dnodes in TDengine cluster, `firstEp` must be configured to point to same address, i.e. the first dnode of the cluster. `fqdn` and `serverPort` compose the address of each node itself. If you want to start multiple TDengine dnodes on a single host, please make sure all other configurations like `dataDir`, `logDir`, and other resources related parameters are not conflicting. -For all the dnodes in a TDengine cluster, below parameters must be configured as exactly same, any node whose configuration is different from dnodes already in the cluster can't join the cluster. +For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster. | **#** | **Parameter** | **Definition** | | ----- | ------------------ | --------------------------------------------------------------------------------- | @@ -61,15 +67,17 @@ For all the dnodes in a TDengine cluster, below parameters must be configured as | 9 | maxVgroupsPerDb | Maximum number vgroups that can be used by each DB | :::note -Prior to version 2.0.19.0, besides the above parameters, `locale` and `charset` must be configured as same too for each dnode. +Prior to version 2.0.19.0, besides the above parameters, `locale` and `charset` must also be configured the same for each dnode. ::: ## Start Cluster +In the following example we assume that first dnode has FQDN h1.taosdata.com and the second dnode has FQDN h2.taosdata.com. + ### Start The First DNODE -The first dnode can be started following the instructions in [Get Started](/get-started/), for example h1.taosdata.com. Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example: +The first dnode can be started following the instructions in [Get Started](/get-started/). Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example: ``` Welcome to the TDengine shell from Linux, Client Version:2.0.0.0 @@ -80,35 +88,49 @@ Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. taos> show dnodes; id | end_point | vnodes | cores | status | role | create_time | ===================================================================================== - 1 | h1.taos.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 | + 1 | h1.taosdata.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 | Query OK, 1 row(s) in set (0.006385s) taos> ``` -From the above output, it is shown that the end point of the started dnode is "h1.taos.com:6030", which is the `firstEp` of the cluster. +From the above output, it is shown that the end point of the started dnode is "h1.taosdata.com:6030", which is the `firstEp` of the cluster. ### Start Other DNODEs There are a few steps necessary to add other dnodes in the cluster. -Firstly, start `taosd` as instructed in [Get Started](/get-started/), assuming it's for the second dnode. Before starting `taosd`, please making sure the configuration is correct, especially `firstEp`, `FQDN` and `serverPort`, `firstEp` must be same as the dnode shown in the section "Start First DNODE", i.e. "h1.taosdata.com" in this example. +Let's assume we are starting the second dnode with FQDN, h2.taosdata.com. First we make sure the configuration is correct. + +```c +// firstEp is the end point to connect to when any dnode starts +firstEp h1.taosdata.com:6030 + +// must be configured to the FQDN of the host where the dnode is launched +fqdn h2.taosdata.com + +// the port used by the dnode, default is 6030 +serverPort 6030 + +``` + +Second, we can start `taosd` as instructed in [Get Started](/get-started/). -Then, on the first dnode, use TDengine CLI `taos` to execute below command to add the end point of the dnode in the cluster. In the command "fqdn:port" should be quoted using double quotes. +Then, on the first dnode i.e. h1.taosdata.com in our example, use TDengine CLI `taos` to execute the following command to add the end point of the dnode in the cluster. In the command "fqdn:port" should be quoted using double quotes. ```sql CREATE DNODE "h2.taos.com:6030"; ``` -Then on the first dnode, execute `show dnodes` in `taos` to show whether the second dnode has been added in the cluster successfully or not. +Then on the first dnode h1.taosdata.com, execute `show dnodes` in `taos` to show whether the second dnode has been added in the cluster successfully or not. ```sql SHOW DNODES; ``` -If the status of the newly added dnode is offlie, please check: +If the status of the newly added dnode is offline, please check: - Whether the `taosd` process is running properly or not -- In the log file `taosdlog.0` to see whether the fqdn and port are correct or not 查 +- In the log file `taosdlog.0` to see whether the fqdn and port are correct The above process can be repeated to add more dnodes in the cluster. diff --git a/docs-en/10-cluster/02-cluster-mgmt.md b/docs-en/10-cluster/02-cluster-mgmt.md index c2c3caeebd3dcb70813caffc14bb52a5221f4650..674c92e2766a4eb304079140af19c8efea72d55e 100644 --- a/docs-en/10-cluster/02-cluster-mgmt.md +++ b/docs-en/10-cluster/02-cluster-mgmt.md @@ -3,16 +3,16 @@ sidebar_label: Operation title: Manage DNODEs --- -It has been introduced that how to deploy and start a cluster from scratch. Once a cluster is ready, the dnode status in the cluster can be shown at any time, new dnode can be added to scale out the cluster, an existing dnode can be removed, even load balance can be performed manually.\ +The previous section, [Deployment],(/cluster/deploy) showed you how to deploy and start a cluster from scratch. Once a cluster is ready, the status of dnode(s) in the cluster can be shown at any time. Dnodes can be managed from the TDengine CLI. New dnode(s) can be added to scale out the cluster, an existing dnode can be removed and you can even perform load balancing manually, if necessary. :::note -All the commands to be introduced in this chapter need to be run through TDengine CLI, sometimes it's necessary to use root privilege. +All the commands introduced in this chapter must be run in the TDengine CLI - `taos`. Note that sometimes it is necessary to use root privilege. ::: ## Show DNODEs -below command can be executed in TDengine CLI `taos` to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes, etc. It's suggested to execute this command to check after adding or removing a dnode. +The below command can be executed in TDengine CLI `taos` to list all dnodes in the cluster, including ID, end point (fqdn:port), status (ready, offline), number of vnodes, number of free vnodes and so on. We recommend executing this command after adding or removing a dnode. ```sql SHOW DNODES; @@ -30,7 +30,7 @@ Query OK, 1 row(s) in set (0.008298s) ## Show VGROUPs -To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located in different dnodes, scaling out can be achieved by adding more vnodes from more dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode according to system resources of the dnodes. +To utilize system resources efficiently and provide scalability, data sharding is required. The data of each database is divided into multiple shards and stored in multiple vnodes. These vnodes may be located on different dnodes. One way of scaling out is to add more vnodes on dnodes. Each vnode can only be used for a single DB, but one DB can have multiple vnodes. The allocation of vnode is scheduled automatically by mnode based on system resources of the dnodes. Launch TDengine CLI `taos` and execute below command: @@ -39,7 +39,7 @@ USE SOME_DATABASE; SHOW VGROUPS; ``` -The example output is as below: +The example output is below: ``` taos> show dnodes; @@ -87,7 +87,7 @@ taos> show dnodes; Query OK, 2 row(s) in set (0.001017s) ``` -It can be seen that the status of the new dnode is "offline", once the dnode is started and connects the firstEp of the cluster, execute the command again and get below example output, from which it can be seen that two dnodes are both in "ready" status. +It can be seen that the status of the new dnode is "offline". Once the dnode is started and connects to the firstEp of the cluster, you can execute the command again and get the example output below. As can be seen, both dnodes are in "ready" status. ``` taos> show dnodes; @@ -100,7 +100,7 @@ Query OK, 2 row(s) in set (0.001316s) ## Drop DNODE -Launch TDengine CLI `taos` and execute the command below to drop or remove a dndoe from the cluster. In the command, `dnodeId` can be gotten from `show dnodes`. +Launch TDengine CLI `taos` and execute the command below to drop or remove a dnode from the cluster. In the command, you can get `dnodeId` from `show dnodes`. ```sql DROP DNODE "fqdn:port"; @@ -112,7 +112,7 @@ or DROP DNODE dnodeId; ``` -The example output is as below: +The example output is below: ``` taos> show dnodes; @@ -132,14 +132,14 @@ taos> show dnodes; Query OK, 1 row(s) in set (0.001137s) ``` -In the above example, when `show dnodes` is executed the first time, two dnodes are shown. Then `drop dnode 2` is executed, after that from the output of executing `show dnodes` again it can be seen that only the dnode with ID 1 is still in the cluster. +In the above example, when `show dnodes` is executed the first time, two dnodes are shown. After `drop dnode 2` is executed, you can execute `show dnodes` again and it can be seen that only the dnode with ID 1 is still in the cluster. :::note -- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Normally, before dropping a dnode, the data belonging to the dnode needs to be migrated to other place. -- Please be noted that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped. +- Once a dnode is dropped, it can't rejoin the cluster. To rejoin, the dnode needs to deployed again after cleaning up the data directory. Before dropping a dnode, the data belonging to the dnode MUST be migrated/backed up according to your data retention, data security or other SOPs. +- Please note that `drop dnode` is different from stopping `taosd` process. `drop dnode` just removes the dnode out of TDengine cluster. Only after a dnode is dropped, can the corresponding `taosd` process be stopped. - Once a dnode is dropped, other dnodes in the cluster will be notified of the drop and will not accept the request from the dropped dnode. -- dnodeID is allocated automatically and can't be interfered manually. dnodeID is generated in ascending order without duplication. +- dnodeID is allocated automatically and can't be manually modified. dnodeID is generated in ascending order without duplication. ::: @@ -155,7 +155,7 @@ ALTER DNODE BALANCE "VNODE:-DNODE:"; In the above command, `source-dnodeId` is the original dnodeId where the vnode resides, `dest-dnodeId` specifies the target dnode. vgId (vgroup ID) can be shown by `SHOW VGROUPS `. -Firstly `show vgroups` is executed to show the vgrup distribution. +First `show vgroups` is executed to show the vgroup distribution. ``` taos> show vgroups; @@ -172,7 +172,7 @@ taos> show vgroups; Query OK, 8 row(s) in set (0.001314s) ``` -It can be seen that there are 5 vgroups in dnode 3 and 3 vgroups in node 1, now we want to move vgId 18 from dnode 3 to dnode 1. Execute below command in `taos` +It can be seen that there are 5 vgroups in dnode 3 and 3 vgroups in node 1, now we want to move vgId 18 from dnode 3 to dnode 1. Execute the below command in `taos` ``` taos> alter dnode 3 balance "vnode:18-dnode:1"; @@ -202,12 +202,12 @@ taos> show vgroups; Query OK, 8 row(s) in set (0.001242s) ``` -It can be seen from above output that vgId 18 has been moved from dndoe 3 to dnode 1. +It can be seen from above output that vgId 18 has been moved from dnode 3 to dnode 1. :::note -- Manual load balancing can only be performed when the automatic load balancing is disabled, i.e. `balance` is set to 0 只. -- Only vnode in normal state, i.e. master or slave, can be moved. vnode can't moved when its in status offline, unsynced or syncing. +- Manual load balancing can only be performed when the automatic load balancing is disabled, i.e. `balance` is set to 0. +- Only a vnode in normal state, i.e. master or slave, can be moved. vnode can't be moved when its in status offline, unsynced or syncing. - Before moving a vnode, it's necessary to make sure the target dnode has enough resources: CPU, memory and disk. ::: diff --git a/docs-en/10-cluster/03-ha-and-lb.md b/docs-en/10-cluster/03-ha-and-lb.md index 53c95be9e995a728b2b4053e4f204df58271716e..bd718eef9f8dc181628132de831dbca2af59d158 100644 --- a/docs-en/10-cluster/03-ha-and-lb.md +++ b/docs-en/10-cluster/03-ha-and-lb.md @@ -7,44 +7,45 @@ title: High Availability and Load Balancing High availability of vnode and mnode can be achieved through replicas in TDengine. -The number of vnodes is associated with each DB, there can be multiple DBs in a TDengine cluster. For the purpose of operation, different number of replicas can be configured properly for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas, the default value is 1. With single replica, the high availability of the system can't be guaranteed. Whenever one node is down, data service would be unavailable. The number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation would fail with error "more dnodes are needed". Below SQL statement is used to create a database named as "demo" with 3 replicas. +A TDengine cluster can have multiple databases. Each database has a number of vnodes associated with it. A different number of replicas can be configured for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas. The default value for `replica` is 1. Naturally, a single replica cannot guarantee high availability since if one node is down, the data service is unavailable. Note that the number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation will fail with error "more dnodes are needed". The SQL statement below is used to create a database named "demo" with 3 replicas. ```sql CREATE DATABASE demo replica 3; ``` -The data in a DB is divided into multiple shards and stored in multiple vgroups. The number of vnodes in each group is determined by the number of replicas set for the DB. The vnodes in each vgroups store exactly same data. For the purpose of high availability, the vnodes in a vgroup must be located in different dnodes on different hosts. As long as over half of the vnodes in a vgroup are in online state, the vgroup is able to serve data access. Otherwise the vgroup can't handle any data access for reading or inserting data. +The data in a DB is divided into multiple shards and stored in multiple vgroups. The number of vnodes in each vgroup is determined by the number of replicas set for the DB. The vnodes in each vgroup store exactly the same data. For the purpose of high availability, the vnodes in a vgroup must be located in different dnodes on different hosts. As long as over half of the vnodes in a vgroup are in an online state, the vgroup is able to provide data access. Otherwise the vgroup can't provide data access for reading or inserting data. -There may be data for multiple DBs in a dnode. Once a dnode is down, multiple DBs may be affected. However, it's hard to say the cluster is guaranteed to work properly as long as over half of dnodes are online because vnodes are introduced and there may be complex mapping between vnodes and dnodes. +There may be data for multiple DBs in a dnode. When a dnode is down, multiple DBs may be affected. While in theory, the cluster will provide data access for reading or inserting data if over half the vnodes in vgroups are online, because of the possibly complex mapping between vnodes and dnodes, it is difficult to guarantee that the cluster will work properly if over half of the dnodes are online. ## High Availability of Mnode -Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured using system parameter `numOfMNodes`, the valid time range is [1,3]. To make sure the data consistency between mnodes, the data replication between mnodes is performed in synchronous way. +Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured using system parameter `numOfMNodes`. The valid range for `numOfMnodes` is [1,3]. To ensure data consistency between mnodes, data replication between mnodes is performed synchronously. -There may be multiple dnodes in a cluster, but only one mnode can be started in each dnode. Which one or ones of the dnodes will be designated as mnodes is automatically determined by TDengine according to the cluster configuration and system resources. Command `show mnodes` can be executed in TDengine `taos` to show the mnodes in the cluster. +There may be multiple dnodes in a cluster, but only one mnode can be started in each dnode. Which one or ones of the dnodes will be designated as mnodes is automatically determined by TDengine according to the cluster configuration and system resources. The command `show mnodes` can be executed in TDengine `taos` to show the mnodes in the cluster. ```sql SHOW MNODES; ``` -The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode, because there must be at least one mnode otherwise the cluster doesn't work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched. +The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched. For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher. :::note -If high availability is important for your system, both vnode and mnode must be configured to have multiple replicas. How to configure for them are different and have been described. +If high availability is important for your system, both vnode and mnode must be configured to have multiple replicas. ::: -## Load Balance +## Load Balancing -Load balance will be triggered in 3 cades without manual intervention. +Load balancing will be triggered in 3 cases without manual intervention. -- When a new dnode is joined in the cluster, automatic load balancing may be triggered, some data from some dnodes may be transferred to the new dnode automatically. +- When a new dnode joins the cluster, automatic load balancing may be triggered. Some data from other dnodes may be transferred to the new dnode automatically. - When a dnode is removed from the cluster, the data from this dnode will be transferred to other dnodes automatically. - When a dnode is too hot, i.e. too much data has been stored in it, automatic load balancing may be triggered to migrate some vnodes from this dnode to other dnodes. -- :::tip - Automatic load balancing is controlled by parameter `balance`, 0 means disabled and 1 means enabled. + +:::tip +Automatic load balancing is controlled by the parameter `balance`, 0 means disabled and 1 means enabled. This is set in the file [taos.cfg](https://docs.tdengine.com/reference/config/#balance). ::: @@ -52,26 +53,26 @@ Load balance will be triggered in 3 cades without manual intervention. When a dnode is offline, it can be detected by the TDengine cluster. There are two cases: -- The dnode becomes online again before the threshold configured in `offlineThreshold` is reached, it is still in the cluster and data replication is started automatically. The dnode can work properly after the data syncup is finished. +- The dnode comes online before the threshold configured in `offlineThreshold` is reached. The dnode is still in the cluster and data replication is started automatically. The dnode can work properly after the data sync is finished. -- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. System alert will be generated and automatic load balancing will be triggered too if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not be joined in the cluster automatically, it can only be joined manually by the system operator. +- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster. :::note -If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted after all the vnodes or mnodes in the group become online and can exchange status, then the vgroup (or mnode group) is able to provide service. +If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service. ::: ## Arbitrator -If the number of replicas is set to an even number like 2, when half of the vnodes in a vgroup don't work master node can't be voted. Similar case is also applicable to mnode if the number of mnodes is set to an even number like 2. +The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a master node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc. -To resolve this problem, a new arbitrator component named `tarbitrator`, abbreviated for TDengine Arbitrator, was introduced. Arbitrator simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. With Arbitrator, any vgroup or mnode group can be considered as having number of member nodes and master node can be selected. +To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally. -Normally, it's suggested to configure replica number of each DB or system parameter `numOfMNodes` to an odd number. However, if a user is very sensitive to storage space, replica number of 2 plus arbitrator component can be used to achieve both lower cost of storage space and high availability. +Normally, it's prudent to configure the replica number for each DB or system parameter `numOfMNodes` to be an odd number. However, if a user is very sensitive to storage space, a replica number of 2 plus arbitrator component can be used to achieve both lower cost of storage space and high availability. Arbitrator component is installed with the server package. For details about how to install, please refer to [Install](/operation/pkg-install). The `-p` parameter of `tarbitrator` can be used to specify the port on which it provides service. -In the configuration file `taos.cfg` of each dnode, parameter `arbitrator` needs to be configured to the end point of the `tarbitrator` process. arbitrator component will be used automatically if the replica is configured to an even number and will be ignored if the replica is configured to an odd number. +In the configuration file `taos.cfg` of each dnode, parameter `arbitrator` needs to be configured to the end point of the `tarbitrator` process. Arbitrator component will be used automatically if the replica is configured to an even number and will be ignored if the replica is configured to an odd number. Arbitrator can be shown by executing command in TDengine CLI `taos` with its role shown as "arb". diff --git a/docs-en/10-cluster/index.md b/docs-en/10-cluster/index.md index 7f8d87965754c1f529d806ad59f54ea935fe31ce..5a45a2ce7b08c67322265cf1bbd54ef66cbfc027 100644 --- a/docs-en/10-cluster/index.md +++ b/docs-en/10-cluster/index.md @@ -3,11 +3,13 @@ title: Cluster keywords: ["cluster", "high availability", "load balance", "scale out"] --- -TDengine can be deployed in cluster mode to increase the processing capability and high availability. In cluster mode, any data can have multiple replications for the purpose of high availability and load balance. TDengine cluster can be scaled out easily to support more data collecting points and more data. +TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source. + +This chapter mainly introduces cluster deployment, maintenance, and how to achieve high availability and load balancing. ```mdx-code-block import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-en/12-taos-sql/01-data-type.md b/docs-en/12-taos-sql/01-data-type.md index 77e95109c7dff1b01b9fd0680fab0fb123445413..3f5a49e3135771c6c1e62bcf158a99ee30f1ed9d 100644 --- a/docs-en/12-taos-sql/01-data-type.md +++ b/docs-en/12-taos-sql/01-data-type.md @@ -1,37 +1,37 @@ --- title: Data Types -description: "The data types supported by TDengine include timestamp, float, JSON, etc" +description: "TDengine supports a variety of data types including timestamp, float, JSON and many others." --- -When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows or querying data, timestamp must follow below rules: +When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below: -- the format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128` -- internal function `now` can be used to get the current timestamp of the client side -- the current timestamp of the client side is applied when `now` is used to insert data +- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128` +- Internal function `now` can be used to get the current timestamp on the client side +- The current timestamp of the client side is applied when `now` is used to insert data - Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from 1970-01-01 00:00:00.000 (UTC/GMT) -- timestamp can be applied with add/substract operation, for example `now-2h` means 2 hours back from the time at which query is executed,the unit can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), w(week.。 So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operation. +- Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations. -Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`, like below, the default time precision is millisecond. +Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds. ```sql CREATE DATABASE db_name PRECISION 'ns'; ``` -In TDengine, below data types can be used when specifying a column or tag. +In TDengine, the data types below can be used when specifying a column or tag. -| # | **类型** | **Bytes** | **说明** | +| # | **type** | **Bytes** | **Description** | | --- | :-------: | --------- | ------------------------- | | 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported | | 2 | INT | 4 | Integer, the value range is [-2^31+1, 2^31-1], while -2^31 is treated as NULL | | 3 | BIGINT | 8 | Long integer, the value range is [-2^63+1, 2^63-1], while -2^63 is treated as NULL | | 4 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] | -| 5 | DOUBLE | 8 | double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] | +| 5 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] | | 6 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. The string length can be up to 16374 bytes. The string value must be quoted with single quotes. The literal single quote inside the string must be preceded with back slash like `\'` | | 7 | SMALLINT | 2 | Short integer, the value range is [-32767, 32767], while -32768 is treated as NULL | -| 8 | TINYINT | 1 | Single-byte integer, the value range is [-127, 127], while -128 is treated as NLLL | +| 8 | TINYINT | 1 | Single-byte integer, the value range is [-127, 127], while -128 is treated as NULL | | 9 | BOOL | 1 | Bool, the value range is {true, false} | -| 10 | NCHAR | User Defined| Multiple-Byte string that can include like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. Error will be reported the string value exceeds the length defined. | -| 11 | JSON | | json type can only be used on tag, a tag of json type is excluded with any other tags of any other type | +| 10 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | +| 11 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type | :::tip TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes. @@ -39,7 +39,7 @@ TDengine is case insensitive and treats any characters in the sql command as low ::: :::note -Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multiple-byte characters must be stored in NCHAR type. +Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type. ::: diff --git a/docs-en/12-taos-sql/02-database.md b/docs-en/12-taos-sql/02-database.md index 12e2edf8bae21059e8c2d5c18858d502c834e9c1..80581b2f1bc7ce9cd046c18873d3f22b6804d8cf 100644 --- a/docs-en/12-taos-sql/02-database.md +++ b/docs-en/12-taos-sql/02-database.md @@ -4,7 +4,7 @@ title: Database description: "create and drop database, show or change database parameters" --- -## Create Datable +## Create Database ``` CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; @@ -12,11 +12,11 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; :::info -1. KEEP specifies the number of days for which the data in the database to be created will be kept, the default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold. +1. KEEP specifies the number of days for which the data in the database will be retained. The default value is 3650 days, i.e. 10 years. The data will be deleted automatically once its age exceeds this threshold. 2. UPDATE specifies whether the data can be updated and how the data can be updated. - 1. UPDATE set to 0 means update operation is not allowed, the data with an existing timestamp will be dropped silently. - 2. UPDATE set to 1 means the whole row will be updated, the columns for which no value is specified will be set to NULL - 3. UPDATE set to 2 means updating a part of columns for a row is allowed, the columns for which no value is specified will be kept as no change + 1. UPDATE set to 0 means update operation is not allowed. The update for data with an existing timestamp will be discarded silently and the original record in the database will be preserved as is. + 2. UPDATE set to 1 means the whole row will be updated. The columns for which no value is specified will be set to NULL. + 3. UPDATE set to 2 means updating a subset of columns for a row is allowed. The columns for which no value is specified will be kept unchanged. 3. The maximum length of database name is 33 bytes. 4. The maximum length of a SQL statement is 65,480 bytes. 5. Below are the parameters that can be used when creating a database @@ -34,8 +34,8 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; - quorum: [Description](/reference/config/#quorum) - maxVgroupsPerDb: [Description](/reference/config/#maxvgroupsperdb) - comp: [Description](/reference/config/#comp) - - precision: [Description](reference/config/#precision) -6. Please be noted that all of the parameters mentioned in this section can be configured in configuration file `taosd.cfg` at server side and used by default, can be override if they are specified in `create database` statement. + - precision: [Description](/reference/config/#precision) +6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement. ::: @@ -52,7 +52,7 @@ USE db_name; ``` :::note -This way is not applicable when using a REST connection +This way is not applicable when using a REST connection. In a REST connection the database name must be specified before a table or stable name. For e.g. to query the stable "meters" in database "test" the query would be "SELECT count(*) from test.meters" ::: @@ -63,13 +63,13 @@ DROP DATABASE [IF EXISTS] db_name; ``` :::note -All data in the database will be deleted too. This command must be used with caution. +All data in the database will be deleted too. This command must be used with extreme caution. Please follow your organization's data integrity, data backup, data security or any other applicable SOPs before using this command. ::: ## Change Database Configuration -Some examples are shown below to demonstrate how to change the configuration of a database. Please be noted that some configuration parameters can be changed after the database is created, but some others can't, for details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/). +Some examples are shown below to demonstrate how to change the configuration of a database. Please note that some configuration parameters can be changed after the database is created, but some cannot. For details of the configuration parameters of database please refer to [Configuration Parameters](/reference/config/). ``` ALTER DATABASE db_name COMP 2; @@ -81,7 +81,7 @@ COMP parameter specifies whether the data is compressed and how the data is comp ALTER DATABASE db_name REPLICA 2; ``` -REPLICA parameter specifies the number of replications of the database. +REPLICA parameter specifies the number of replicas of the database. ``` ALTER DATABASE db_name KEEP 365; @@ -124,4 +124,4 @@ SHOW DATABASES; SHOW CREATE DATABASE db_name; ``` -This command is useful when migrating the data from one TDengine cluster to another one. Firstly this command can be used to get the CREATE statement, which in turn can be used in another TDengine to create an exactly same database. +This command is useful when migrating the data from one TDengine cluster to another. This command can be used to get the CREATE statement, which can be used in another TDengine instance to create the exact same database. diff --git a/docs-en/12-taos-sql/03-table.md b/docs-en/12-taos-sql/03-table.md index 3ec429f9dfe72e59d28df0581d8f118f324e8771..f065a8e2396583bb7a512446b513ed60056ad55e 100644 --- a/docs-en/12-taos-sql/03-table.md +++ b/docs-en/12-taos-sql/03-table.md @@ -12,12 +12,12 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam :::info -1. The first column of a table must be in TIMESTAMP type, and it will be set as primary key automatically -2. The maximum length of table name is 192 bytes. -3. The maximum length of each row is 16k bytes, please be notes that the extra 2 bytes used by each BINARY/NCHAR column are also counted in. -4. The name of sub-table can only be consisted of English characters, digits and underscore, and can't be started with digit. Table names are case insensitive. -5. The maximum length in bytes must be specified when using BINARY or NCHAR type. -6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for name length is still valid. The table names specified using escape character are case sensitive. Only ASCII visible characters can be used with escape character. +1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key. +2. The maximum length of the table name is 192 bytes. +3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted. +4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive. +5. The maximum length in bytes must be specified when using BINARY or NCHAR types. +6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. Only ASCII visible characters can be used with escape character. For example \`aBc\` and \`abc\` are different table names but `abc` and `aBc` are same table names because they are both converted to `abc` internally. ::: @@ -28,9 +28,9 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name TAGS (tag_value1, ...); ``` -The above command creates a subtable using the specified super table as template and the specified tab values. +The above command creates a subtable using the specified super table as a template and the specified tag values. -### Create Subtable Using STable As Template With A Part of Tags +### Create Subtable Using STable As Template With A Subset of Tags ``` CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...); @@ -44,11 +44,11 @@ The tags for which no value is specified will be set to NULL. CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...; ``` -This way can be used to create a lot of tables in a single SQL statement to accelerate the speed of the creating tables. +This can be used to create a lot of tables in a single SQL statement while making table creation much faster. :::info -- Creating tables in batch must use super table as template. +- Creating tables in batch must use a super table as a template. - The length of single statement is suggested to be between 1,000 and 3,000 bytes for best performance. ::: @@ -62,7 +62,7 @@ DROP TABLE [IF EXISTS] tb_name; ## Show All Tables In Current Database ``` -SHOW TABLES [LIKE tb_name_wildcar]; +SHOW TABLES [LIKE tb_name_wildcard]; ``` ## Show Create Statement of A Table @@ -71,7 +71,7 @@ SHOW TABLES [LIKE tb_name_wildcar]; SHOW CREATE TABLE tb_name; ``` -This way is useful when migrating the data in one TDengine cluster to another one because it can be used to create exactly same tables in the target database. +This is useful when migrating the data in one TDengine cluster to another one because it can be used to create the exact same tables in the target database. ## Show Table Definition @@ -90,7 +90,7 @@ ALTER TABLE tb_name ADD COLUMN field_name data_type; :::info 1. The maximum number of columns is 4096, the minimum number of columns is 2. -2. The maximum length of column name is 64 bytes. +2. The maximum length of a column name is 64 bytes. ::: @@ -101,7 +101,7 @@ ALTER TABLE tb_name DROP COLUMN field_name; ``` :::note -If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, but the change will be automatically applied to all the sub tables created using this super table as template. For tables created in normal way, the table definition can be changed directly on the table. +If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, and the change will be automatically applied to all the subtables created using this super table as template. For tables created in the normal way, the table definition can be changed directly on the table. ::: @@ -111,10 +111,10 @@ If a table is created using a super table as template, the table definition can ALTER TABLE tb_name MODIFY COLUMN field_name data_type(length); ``` -The the type of a column is variable length, like BINARY or NCHAR, this way can be used to change (or increase) the length of the column. +If the type of a column is variable length, like BINARY or NCHAR, this command can be used to change the length of the column. :::note -If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, but the change will be automatically applied to all the sub tables created using this super table as template. For tables created in normal way, the table definition can be changed directly on the table. +If a table is created using a super table as template, the table definition can only be changed on the corresponding super table, and the change will be automatically applied to all the subtables created using this super table as template. For tables created in the normal way, the table definition can be changed directly on the table. ::: diff --git a/docs-en/12-taos-sql/04-stable.md b/docs-en/12-taos-sql/04-stable.md index 8d763ac22f0c64ff898036653c1fd58c6df00298..b8a608792ab327a81129d29ddd0ff44d7af6e6c5 100644 --- a/docs-en/12-taos-sql/04-stable.md +++ b/docs-en/12-taos-sql/04-stable.md @@ -9,20 +9,20 @@ Keyword `STable`, abbreviated for super table, is supported since version 2.0.15 ::: -## Crate STable +## Create STable ``` CREATE STable [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]); ``` -The SQL statement of creating STable is similar to that of creating table, but a special column named as `TAGS` must be specified with the names and types of the tags. +The SQL statement of creating a STable is similar to that of creating a table, but a special column set named `TAGS` must be specified with the names and types of the tags. :::info -1. The tag types specified in TAGS should NOT be timestamp. Since 2.1.3.0 timestamp type can be used in TAGS column, but its value must be fixed and arithmetic operation can't be applied on it. -2. The tag names specified in TAGS should NOT be same as other columns. -3. The tag names specified in TAGS should NOT be same as any reserved keywords.(Please refer to [keywords](/taos-sql/keywords/) -4. The maximum number of tags specified in TAGS is 128, but there must be at least one tag, and the total length of all tag columns should NOT exceed 16KB. +1. A tag can be of type timestamp, since version 2.1.3.0, but its value must be fixed and arithmetic operations cannot be performed on it. Prior to version 2.1.3.0, tag types specified in TAGS could not be of type timestamp. +2. The tag names specified in TAGS should NOT be the same as other columns. +3. The tag names specified in TAGS should NOT be the same as any reserved keywords.(Please refer to [keywords](/taos-sql/keywords/) +4. The maximum number of tags specified in TAGS is 128, there must be at least one tag, and the total length of all tag columns should NOT exceed 16KB. ::: @@ -32,7 +32,7 @@ The SQL statement of creating STable is similar to that of creating table, but a DROP STable [IF EXISTS] stb_name; ``` -All the sub-tables created using the deleted STable will be deleted automatically. +All the subtables created using the deleted STable will be deleted automatically. ## Show All STables @@ -40,7 +40,7 @@ All the sub-tables created using the deleted STable will be deleted automaticall SHOW STableS [LIKE tb_name_wildcard]; ``` -This command can be used to display the information of all STables in the current database, including name, creation time, number of columns, number of tags, number of tables created using this STable. +This command can be used to display the information of all STables in the current database, including name, creation time, number of columns, number of tags, and number of tables created using this STable. ## Show The Create Statement of A STable @@ -48,7 +48,7 @@ This command can be used to display the information of all STables in the curren SHOW CREATE STable stb_name; ``` -This command is useful in migrating data from one TDengine cluster to another one because it can be used to create an exactly same STable in the target database. +This command is useful in migrating data from one TDengine cluster to another because it can be used to create the exact same STable in the target database. ## Get STable Definition @@ -76,7 +76,7 @@ ALTER STable stb_name DROP COLUMN field_name; ALTER STable stb_name MODIFY COLUMN field_name data_type(length); ``` -This command can be used to change (or incerase, more specifically) the length of a column of variable length types, like BINARY or NCHAR. +This command can be used to change (or more specifically, increase) the length of a column of variable length types, like BINARY or NCHAR. ## Change Tags of A STable @@ -94,7 +94,7 @@ This command is used to add a new tag for a STable and specify the tag type. ALTER STable stb_name DROP TAG tag_name; ``` -The tag will be removed automatically from all the sub tables crated using the super table as template once a tag is removed from a super table. +The tag will be removed automatically from all the subtables, created using the super table as template, once a tag is removed from a super table. ### Change A Tag @@ -102,7 +102,7 @@ The tag will be removed automatically from all the sub tables crated using the s ALTER STable stb_name CHANGE TAG old_tag_name new_tag_name; ``` -The tag name will be changed automatically from all the sub tables crated using the super table as template once a tag name is changed for a super table. +The tag name will be changed automatically for all the subtables, created using the super table as template, once a tag name is changed for a super table. ### Change Tag Length @@ -110,9 +110,9 @@ The tag name will be changed automatically from all the sub tables crated using ALTER STable stb_name MODIFY TAG tag_name data_type(length); ``` -This command can be used to change (or incerase, more specifically) the length of a tag of variable length types, like BINARY or NCHAR. +This command can be used to change (or more specifically, increase) the length of a tag of variable length types, like BINARY or NCHAR. :::note -Changing tag value can be applied to only sub tables. All other tag operations, like add tag, remove tag, however, can be applied to only STable. If a new tag is added for a STable, the tag will be added with NULL value for all its sub tables. +Changing tag values can be applied to only subtables. All other tag operations, like add tag, remove tag, however, can be applied to only STable. If a new tag is added for a STable, the tag will be added with NULL value for all its subtables. ::: diff --git a/docs-en/12-taos-sql/05-insert.md b/docs-en/12-taos-sql/05-insert.md index 47900a44fde85574d7c7cf361c8eaa08c72b1ad2..1336cd7238a19190583ea9d268a64df242ffd3c9 100644 --- a/docs-en/12-taos-sql/05-insert.md +++ b/docs-en/12-taos-sql/05-insert.md @@ -19,15 +19,15 @@ INSERT INTO ## Insert Single or Multiple Rows -Single row or multiple rows specified with VALUES can be inserted into a specific table. For example +Single row or multiple rows specified with VALUES can be inserted into a specific table. For example: -Single row is inserted using below statement. +A single row is inserted using the below statement. ```sq; INSERT INTO d1001 VALUES (NOW, 10.2, 219, 0.32); ``` -Double rows can be inserted using below statement. +Double rows are inserted using the below statement. ```sql INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (1626164208000, 10.15, 217, 0.33); @@ -36,7 +36,7 @@ INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (162616420 :::note 1. In the second example above, different formats are used in the two rows to be inserted. In the first row, the timestamp format is a date and time string, which is interpreted from the string value only. In the second row, the timestamp format is a long integer, which will be interpreted based on the database time precision. -2. When trying to insert multiple rows in single statement, only the timestamp of one row can be set as NOW, otherwise there will be duplicate timestamps among the rows and the result may be out of expectation because NOW will be interpreted as the time when the statement is executed. +2. When trying to insert multiple rows in a single statement, only the timestamp of one row can be set as NOW, otherwise there will be duplicate timestamps among the rows and the result may be out of expectation because NOW will be interpreted as the time when the statement is executed. 3. The oldest timestamp that is allowed is subtracting the KEEP parameter from current time. 4. The newest timestamp that is allowed is adding the DAYS parameter to current time. @@ -51,13 +51,13 @@ INSERT INTO d1001 (ts, current, phase) VALUES ('2021-07-13 14:06:33.196', 10.27, ``` :::info -If no columns are explicitly specified, all the columns must be provided with values, this is called "all column mode". The insert performance of all column mode is much better than specifying a part of columns, so it's encouraged to use "all column mode" while providing NULL value explicitly for the columns for which no actual value can be provided. +If no columns are explicitly specified, all the columns must be provided with values, this is called "all column mode". The insert performance of all column mode is much better than specifying a subset of columns, so it's encouraged to use "all column mode" while providing NULL value explicitly for the columns for which no actual value can be provided. ::: ## Insert Into Multiple Tables -One or multiple rows can be inserted into multiple tables in single SQL statement, with or without specifying specific columns. +One or multiple rows can be inserted into multiple tables in a single SQL statement, with or without specifying specific columns. ```sql INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) @@ -66,40 +66,40 @@ INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07- ## Automatically Create Table When Inserting -If it's not sure whether the table already exists, the table can be created automatically while inserting using below SQL statement. To use this functionality, a STable must be used as template and tag values must be provided. +If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. ```sql -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32); +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32); ``` -It's not necessary to provide values for all tag when creating tables automatically, the tags without values provided will be set to NULL. +It's not necessary to provide values for all tags when creating tables automatically, the tags without values provided will be set to NULL. ```sql INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33); ``` -Multiple rows can also be inserted into same table in single SQL statement using this way.自 +Multiple rows can also be inserted into the same table in a single SQL statement. ```sql -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33) d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31); ``` :::info -Prior to version 2.0.20.5, when using `INSERT` to create table automatically and specify the columns, the column names must follow the table name immediately. From version 2.0.20.5, the column names can follow the table name immediately, also can be put between `TAGS` and `VALUES`. In same SQL statement, however, these two ways of specifying column names can't be mixed. +Prior to version 2.0.20.5, when using `INSERT` to create tables automatically and specifying the columns, the column names must follow the table name immediately. From version 2.0.20.5, the column names can follow the table name immediately, also can be put between `TAGS` and `VALUES`. In the same SQL statement, however, these two ways of specifying column names can't be mixed. ::: ## Insert Rows From A File -Besides using `VALUES` to insert one or multiple rows, the data to be inserted can also be prepared in a CSV file with comma as separator and each field value quoted by single quotes. Table definition is not required in the CSV file. For example, if file "/tmp/csvfile.csv" contains below data: +Besides using `VALUES` to insert one or multiple rows, the data to be inserted can also be prepared in a CSV file with comma as separator and each field value quoted by single quotes. Table definition is not required in the CSV file. For example, if file "/tmp/csvfile.csv" contains the below data: ``` '2021-07-13 14:07:34.630', '10.2', '219', '0.32' '2021-07-13 14:07:35.779', '10.15', '217', '0.33' ``` -Then data in this file can be inserted by below SQL statement: +Then data in this file can be inserted by the SQL statement below: ```sql INSERT INTO d1001 FILE '/tmp/csvfile.csv'; @@ -107,30 +107,30 @@ INSERT INTO d1001 FILE '/tmp/csvfile.csv'; ## Create Tables Automatically and Insert Rows From File -From version 2.1.5.0, tables can be automatically created using a super table as template when inserting data from a CSV file, Like below: +From version 2.1.5.0, tables can be automatically created using a super table as template when inserting data from a CSV file, like below: ```sql -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile.csv'; +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile.csv'; ``` -Multiple tables can be automatically created and inserted in single SQL statement, like below:也 +Multiple tables can be automatically created and inserted in a single SQL statement, like below: ```sql -INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile_21001.csv' +INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv' d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; ``` ## More About Insert -For SQL statement like `insert`, stream parsing strategy is applied. That means before an error is found and the execution is aborted, the part prior to the error point has already been executed. Below is an experiment to help understand the behavior. +For SQL statement like `insert`, a stream parsing strategy is applied. That means before an error is found and the execution is aborted, the part prior to the error point has already been executed. Below is an experiment to help understand the behavior. -Firstly, a super table is created. +First, a super table is created. ```sql CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT); ``` -It can be proved that the super table has been created by `SHOW STableS`, but no table exists by `SHOW TABLES`. +It can be proven that the super table has been created by `SHOW STableS`, but no table exists using `SHOW TABLES`. ``` taos> SHOW STableS; @@ -146,7 +146,7 @@ Query OK, 0 row(s) in set (0.000946s) Then, try to create table d1001 automatically when inserting data into it. ```sql -INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a'); +INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('a'); ``` The output shows the value to be inserted is invalid. But `SHOW TABLES` proves that the table has been created automatically by the `INSERT` statement. @@ -161,4 +161,4 @@ taos> SHOW TABLES; Query OK, 1 row(s) in set (0.001091s) ``` -From the above experiment, we can see that even though the value to be inserted is invalid but the table is still created. +From the above experiment, we can see that while the value to be inserted is invalid the table is still created. diff --git a/docs-en/12-taos-sql/06-select.md b/docs-en/12-taos-sql/06-select.md index 22c2ee5e288a8fd388dbb6fba200caae69daacd1..8a017cf92e40aa4a854dcd531b7df291a9243515 100644 --- a/docs-en/12-taos-sql/06-select.md +++ b/docs-en/12-taos-sql/06-select.md @@ -21,7 +21,7 @@ SELECT select_expr [, select_expr ...] ## Wildcard -Wilcard \* can be used to specify all columns. The result includes only data columns for normal tables. +Wildcard \* can be used to specify all columns. The result includes only data columns for normal tables. ``` taos> SELECT * FROM d1001; @@ -39,26 +39,26 @@ The result includes both data columns and tag columns for super table. taos> SELECT * FROM meters; ts | current | voltage | phase | location | groupid | ===================================================================================================================================== - 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | Beijing.Haidian | 2 | - 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | Beijing.Haidian | 2 | - 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | Beijing.Haidian | 3 | - 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | Beijing.Haidian | 3 | - 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | Beijing.Chaoyang | 3 | - 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | Beijing.Chaoyang | 3 | - 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | Beijing.Chaoyang | 2 | - 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | Beijing.Chaoyang | 2 | - 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | Beijing.Chaoyang | 2 | + 2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | California.LoSangeles | 2 | + 2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | California.LoSangeles | 2 | + 2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | California.LoSangeles | 3 | + 2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | California.LoSangeles | 3 | + 2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | California.SanFrancisco | 3 | + 2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | California.SanFrancisco | 3 | + 2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | California.SanFrancisco | 2 | + 2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | California.SanFrancisco | 2 | + 2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | California.SanFrancisco | 2 | Query OK, 9 row(s) in set (0.002022s) ``` -Wildcard can be used with table name as prefix, both below SQL statements have same effects and return all columns. +Wildcard can be used with table name as prefix. Both SQL statements below have the same effect and return all columns. ```SQL SELECT * FROM d1001; SELECT d1001.* FROM d1001; ``` -In JOIN query, however, with or without table name prefix will return different results. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table. +In a JOIN query, however, the results are different with or without a table name prefix. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table. ``` taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts; @@ -76,7 +76,7 @@ taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts; Query OK, 1 row(s) in set (0.020443s) ``` -Wilcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row. +Wildcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row. ``` taos> SELECT COUNT(*) FROM d1001; @@ -96,20 +96,20 @@ Query OK, 1 row(s) in set (0.000849s) ## Tags -Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please be noted that, however, wildcard \* doesn't represent any tag column, that means tag columns must be specified explicitly like below example. +Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please note however, that, wildcard \* cannot be used to represent any tag column. This means that tag columns must be specified explicitly like the example below. ``` taos> SELECT location, groupid, current FROM d1001 LIMIT 2; location | groupid | current | ====================================================================== - Beijing.Chaoyang | 2 | 10.30000 | - Beijing.Chaoyang | 2 | 12.60000 | + California.SanFrancisco | 2 | 10.30000 | + California.SanFrancisco | 2 | 12.60000 | Query OK, 2 row(s) in set (0.003112s) ``` ## Get distinct values -`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table, it can also be used to get all the unique values of data columns from a table or sub table. +`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table. It can also be used to get all the unique values of data columns from a table or subtable. ```sql SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name; @@ -118,15 +118,15 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name; :::info -1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output. -2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision nature of floating numbers. -3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in same SQL statement. +1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output. +2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers. +3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in the same SQL statement. ::: ## Columns Names of Result Set -When using `SELECT`, the column names in the result set will be same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example +When using `SELECT`, the column names in the result set will be the same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example ``` taos> SELECT ts, ts AS primary_key_ts FROM d1001; @@ -161,7 +161,7 @@ SELECT * FROM d1001; ## Special Query -Some special query functionalities can be performed without `FORM` sub-clause. For example, below statement can be used to get the current database in use. +Some special query functions can be invoked without `FROM` sub-clause. For example, the statement below can be used to get the current database in use. ``` taos> SELECT DATABASE(); @@ -181,7 +181,7 @@ taos> SELECT DATABASE(); Query OK, 1 row(s) in set (0.000184s) ``` -Below statement can be used to get the version of client or server. +The statement below can be used to get the version of client or server. ``` taos> SELECT CLIENT_VERSION(); @@ -197,7 +197,7 @@ taos> SELECT SERVER_VERSION(); Query OK, 1 row(s) in set (0.000077s) ``` -Below statement is used to check the server status. One integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This way is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing connection from connection pool when using wrong heartbeat checking SQL statement. +The statement below is used to check the server status. An integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing the connection from a connection pool when using the wrong heartbeat checking SQL statement. ``` taos> SELECT SERVER_STATUS(); @@ -248,12 +248,12 @@ summary: ## Special Keywords in TAOS SQL -- `TBNAME`: it is treated as a special tag when selecting on a super table, representing the name of sub-tables in that super table. +- `TBNAME`: it is treated as a special tag when selecting on a super table, representing the name of subtables in that super table. - `_c0`: represents the first column of a table or super table. ## Tips -To get all the sub tables and corresponding tag values from a super table: +To get all the subtables and corresponding tag values from a super table: ```SQL SELECT TBNAME, location FROM meters; @@ -271,10 +271,10 @@ Only filter on `TAGS` are allowed in the `where` clause for above two query stat taos> SELECT TBNAME, location FROM meters; tbname | location | ================================================================== - d1004 | Beijing.Haidian | - d1003 | Beijing.Haidian | - d1002 | Beijing.Chaoyang | - d1001 | Beijing.Chaoyang | + d1004 | California.LosAngeles | + d1003 | California.LosAngeles | + d1002 | California.SanFrancisco | + d1001 | California.SanFrancisco | Query OK, 4 row(s) in set (0.000881s) taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; @@ -284,11 +284,11 @@ taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; Query OK, 1 row(s) in set (0.001091s) ``` -- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of number types, columns can be renamed in the result set. -- Arithmetic operation on columns can't be used in where clause. For example, `where a*2>6;` is not allowed but `where a>6/2;` can be used instead for same purpose. +- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of numerical types, columns can be renamed in the result set. +- Arithmetic operation on columns can't be used in where clause. For example, `where a*2>6;` is not allowed but `where a>6/2;` can be used instead for the same purpose. - Arithmetic operation on columns can't be used as the objectives of select statement. For example, `select min(2*a) from t;` is not allowed but `select 2*min(a) from t;` can be used instead. - Logical operation can be used in `WHERE` clause to filter numeric values, wildcard can be used to filter string values. -- Result set are arranged in ascending order of the first column, i.e. timestamp, but it can be controlled to output as descending order of timestamp. If `order by` is used on other columns, the result may be not as expected. By the way, \_c0 is used to represent the first column, i.e. timestamp. +- Result sets are arranged in ascending order of the first column, i.e. timestamp, but it can be controlled to output as descending order of timestamp. If `order by` is used on other columns, the result may not be as expected. By the way, \_c0 is used to represent the first column, i.e. timestamp. - `LIMIT` parameter is used to control the number of rows to output. `OFFSET` parameter is used to specify from which row to output. `LIMIT` and `OFFSET` are executed after `ORDER BY` in the query execution. A simple tip is that `LIMIT 5 OFFSET 2` can be abbreviated as `LIMIT 2, 5`. - What is controlled by `LIMIT` is the number of rows in each group when `GROUP BY` is used. - `SLIMIT` parameter is used to control the number of groups when `GROUP BY` is used. Similar to `LIMIT`, `SLIMIT 5 OFFSET 2` can be abbreviated as `SLIMIT 2, 5`. @@ -296,7 +296,7 @@ Query OK, 1 row(s) in set (0.001091s) ## Where -Logical operations in below table can be used in `where` clause to filter the resulting rows. +Logical operations in below table can be used in the `where` clause to filter the resulting rows. | **Operation** | **Note** | **Applicable Data Types** | | ------------- | ------------------------ | ----------------------------------------- | @@ -312,19 +312,19 @@ Logical operations in below table can be used in `where` clause to filter the re | like | match a wildcard string | **`binary`** **`nchar`** | | match/nmatch | filter regex | **`binary`** **`nchar`** | -**使用说明**: +**Explanations**: -- Operator `<\>` is equal to `!=`, please be noted that this operator can't be used on the first column of any table, i.e.timestamp column. +- Operator `<\>` is equal to `!=`, please note that this operator can't be used on the first column of any table, i.e.timestamp column. - Operator `like` is used together with wildcards to match strings - '%' matches 0 or any number of characters, '\_' matches any single ASCII character. - `\_` is used to match the \_ in the string. - - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. Too long wildcard string may slowdown the execution performance of `LIKE` operator. + - The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. A very long wildcard string may slowdown the execution performance of `LIKE` operator. - `AND` keyword can be used to filter multiple columns simultaneously. AND/OR operation can be performed on single or multiple columns from version 2.3.0.0. However, before 2.3.0.0 `OR` can't be used on multiple columns. - For timestamp column, only one condition can be used; for other columns or tags, `OR` keyword can be used to combine multiple logical operators. For example, `((value > 20 AND value < 30) OR (value < 12))`. - From version 2.3.0.0, multiple conditions can be used on timestamp column, but the result set can only contain single time range. - From version 2.0.17.0, operator `BETWEEN AND` can be used in where clause, for example `WHERE col2 BETWEEN 1.5 AND 3.25` means the filter condition is equal to "1.5 ≤ col2 ≤ 3.25". -- From version 2.1.4.0, operator `IN` can be used in where clause. For example, `WHERE city IN ('Beijing', 'Shanghai')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating precision, only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`. -- From version 2.3.0.0, regular expression is supported in where clause with keyword `match` or `nmatch`, the regular expression is case insensitive. +- From version 2.1.4.0, operator `IN` can be used in the where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDiego')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating point precision errors. Only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`. +- From version 2.3.0.0, regular expression is supported in the where clause with keyword `match` or `nmatch`. The regular expression is case insensitive. ## Regular Expression @@ -342,11 +342,11 @@ The regular expression being used must be compliant with POSIX specification, pl Regular expression can be used against only table names, i.e. `tbname`, and tags of binary/nchar types, but can't be used against data columns. -The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on client side, and will take in effect after restarting the client. +The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client. ## JOIN -From version 2.2.0.0, inner join is fully supported in TDengine. More specifically, the inner join between table and table, that between STable and STable, and that between sub query and sub query are supported. +From version 2.2.0.0, inner join is fully supported in TDengine. More specifically, the inner join between table and table, between STable and STable, and between sub query and sub query are supported. Only primary key, i.e. timestamp, can be used in the join operation between table and table. For example: @@ -364,12 +364,12 @@ FROM temp_STable t1, temp_STable t2 WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; ``` -Similary, join operation can be performed on the result set of multiple sub queries. +Similarly, join operations can be performed on the result set of multiple sub queries. :::note Restrictions on join operation: -- The number of tables or STables in single join operation can't exceed 10. +- The number of tables or STables in a single join operation can't exceed 10. - `FILL` is not allowed in the query statement that includes JOIN operation. - Arithmetic operation is not allowed on the result set of join operation. - `GROUP BY` is not allowed on a part of tables that participate in join operation. @@ -380,9 +380,9 @@ Restrictions on join operation: ## Nested Query -Nested query is also called sub query, that means in a single SQL statement the result of inner query can be used as the data source of the outer query. +Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query. -From 2.2.0.0, unassociated sub query can be used in the `FROM` clause. unassociated means the sub query doesn't use the parameters in the parent query. More specifically, in the `tb_name_list` of `SELECT` statement, an independent SELECT statement can be used. So a complete nested query looks like: +From 2.2.0.0, unassociated sub query can be used in the `FROM` clause. Unassociated means the sub query doesn't use the parameters in the parent query. More specifically, in the `tb_name_list` of `SELECT` statement, an independent SELECT statement can be used. So a complete nested query looks like: ```SQL SELECT ... FROM (SELECT ... FROM ...) ...; @@ -390,14 +390,14 @@ SELECT ... FROM (SELECT ... FROM ...) ...; :::info -- Only one layer of nesting is allowed, that means no sub query is allowed in a sub query -- The result set returned by the inner query will be used as a "virtual table" by the outer query, the "virtual table" can be renamed using `AS` keyword for easy reference in the outer query. +- Only one layer of nesting is allowed, that means no sub query is allowed within a sub query +- The result set returned by the inner query will be used as a "virtual table" by the outer query. The "virtual table" can be renamed using `AS` keyword for easy reference in the outer query. - Sub query is not allowed in continuous query. - JOIN operation is allowed between tables/STables inside both inner and outer queries. Join operation can be performed on the result set of the inner query. - UNION operation is not allowed in either inner query or outer query. -- The functionalities that can be used in the inner query is same as non-nested query. - - `ORDER BY` inside the inner query doesn't make any sense but will slow down the query performance significantly, so please avoid such usage. -- Compared to the non-nested query, the functionalities that can be used in the outer query have such restrictions as: +- The functions that can be used in the inner query are the same as those that can be used in a non-nested query. + - `ORDER BY` inside the inner query is unnecessary and will slow down the query performance significantly. It is best to avoid the use of `ORDER BY` inside the inner query. +- Compared to the non-nested query, the functionality that can be used in the outer query has the following restrictions: - Functions - If the result set returned by the inner query doesn't contain timestamp column, then functions relying on timestamp can't be used in the outer query, like `TOP`, `BOTTOM`, `FIRST`, `LAST`, `DIFF`. - Functions that need to scan the data twice can't be used in the outer query, like `STDDEV`, `PERCENTILE`. @@ -414,7 +414,7 @@ UNION ALL SELECT ... [UNION ALL SELECT ...] ``` -`UNION ALL` operator can be used to combine the result set from multiple select statements as long as the result set of these select statements have exactly same columns. `UNION ALL` doesn't remove redundant rows from multiple result sets. In single SQL statement, at most 100 `UNION ALL` can be supported. +`UNION ALL` operator can be used to combine the result set from multiple select statements as long as the result set of these select statements have exactly the same columns. `UNION ALL` doesn't remove redundant rows from multiple result sets. In a single SQL statement, at most 100 `UNION ALL` can be supported. ### Examples @@ -442,8 +442,8 @@ The sum of col1 and col2 for rows later than 2018-06-01 08:00:00.000 and whose c SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5; ``` -The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutpu.csv` with below SQL statement: +The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutput.csv` with below SQL statement: ```SQL -SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv; +SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutput.csv; ``` diff --git a/docs-en/12-taos-sql/07-function.md b/docs-en/12-taos-sql/07-function.md index 44389f35a2a8ab50a0a5a8ed49286cf55240d597..3589efe9cdd618110203a8439ba03eaaf315f48c 100644 --- a/docs-en/12-taos-sql/07-function.md +++ b/docs-en/12-taos-sql/07-function.md @@ -4,7 +4,7 @@ title: Functions ## Aggregate Functions -Aggregate query is supported in TDengine by following aggregate functions and selection functions. +Aggregate queries are supported in TDengine by the following aggregate functions and selection functions. ### COUNT @@ -12,18 +12,18 @@ Aggregate query is supported in TDengine by following aggregate functions and se SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]; ``` -**Description**:Get the number of rows or the number of non-null values in a table or a super table. +**Description**: Get the number of rows or the number of non-null values in a table or a super table. -**Return value type**:Long integer INT64 +**Return value type**: Long integer INT64 -**Applicable column types**:All +**Applicable column types**: All **Applicable table types**: table, super table, sub table **More explanation**: -- Wildcard (\*) can be used to represent all columns, it's used to get the number of all rows -- The number of non-NULL values will be returned if this function is used on a specific column +- Wildcard (\*) is used to represent all columns. The `COUNT` function is used to get the total number of all rows. +- The number of non-NULL values will be returned if this function is used on a specific column. **Examples**: @@ -47,13 +47,13 @@ Query OK, 1 row(s) in set (0.001075s) SELECT AVG(field_name) FROM tb_name [WHERE clause]; ``` -**Description**:Get the average value of a column in a table or STable +**Description**: Get the average value of a column in a table or STable -**Return value type**:Double precision floating number +**Return value type**: Double precision floating number -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **Examples**: @@ -77,17 +77,17 @@ Query OK, 1 row(s) in set (0.000943s) SELECT TWA(field_name) FROM tb_name WHERE clause; ``` -**Description**:Time weighted average on a specific column within a time range +**Description**: Time weighted average on a specific column within a time range -**Return value type**:Double precision floating number +**Return value type**: Double precision floating number -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **More explanations**: -- From version 2.1.3.0, function TWA can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. +- Since version 2.1.3.0, function TWA can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. ### IRATE @@ -95,17 +95,17 @@ SELECT TWA(field_name) FROM tb_name WHERE clause; SELECT IRATE(field_name) FROM tb_name WHERE clause; ``` -**Description**:instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values. +**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values. -**Return value type**:Double precision floating number +**Return value type**: Double precision floating number -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **More explanations**: -- From version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. +- Since version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable. ### SUM @@ -113,13 +113,13 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause; SELECT SUM(field_name) FROM tb_name [WHERE clause]; ``` -**Description**:The sum of a specific column in a table or STable +**Description**: The sum of a specific column in a table or STable -**Return value type**:Double precision floating number or long integer +**Return value type**: Double precision floating number or long integer -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **Examples**: @@ -143,13 +143,13 @@ Query OK, 1 row(s) in set (0.000980s) SELECT STDDEV(field_name) FROM tb_name [WHERE clause]; ``` -**Description**:Standard deviation of a specific column in a table or STable +**Description**: Standard deviation of a specific column in a table or STable -**Return value type**:Double precision floating number +**Return value type**: Double precision floating number -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable (starting from version 2.0.15.1) +**Applicable table types**: table, STable (since version 2.0.15.1) **Examples**: @@ -167,13 +167,13 @@ Query OK, 1 row(s) in set (0.000915s) SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]; ``` -**Description**:统计表中某列的值是主键(时间戳)的拟合直线方程.start_val 是自变量初始值,step_val 是自变量的步长值. +**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value. -**Return value type**:A string in the format of "(slope, intercept)" +**Return value type**: A string in the format of "(slope, intercept)" -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table only +**Applicable table types**: table only **Examples**: @@ -193,13 +193,13 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause]; **Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence. It can't be used on timestamp column or tags. -**Return value type**:Same as the data type of the column being operated +**Return value type**:Same as the data type of the column being operated upon **Applicable column types**:Data types except for timestamp **More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned. -**Applicable version**:From version 2.6.0.0 +**Applicable version**:Since version 2.6.0.0 **Examples**: @@ -234,7 +234,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause]; **More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case. -**Applicable versions**:From version 2.6.0.0 +**Applicable versions**:Since version 2.6.0.0 **Examples**: @@ -259,9 +259,103 @@ taos> select hyperloglog(dbig) from shll; Query OK, 1 row(s) in set (0.008388s) ``` +### HISTOGRAM + +``` +SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause]; +``` + +**Description**:Returns count of data points in user-specified ranges. + +**Return value type**:Double or INT64, depends on normalized parameter settings. + +**Applicable column type**:Numerical types. + +**Applicable versions**:Since version 2.6.0.0. + +**Applicable table types**: table, STable + +**Explanations**: + +1. bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。 +2. bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively: + + - "user_input": "[1, 3, 5, 7]": User specified bin values. + + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" + "start" - bin starting point. + "width" - bin offset. + "count" - number of bins generated. + "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins. + The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]. + + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" + "start" - bin starting point. + "factor" - exponential factor of bin offset. + "count" - number of bins generated. + "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins. + The above "log_bin" descriptor generates a set of bins:[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]. + +3. normalized: setting to 1/0 to turn on/off result normalization. + +**Example**: + +```mysql +taos> SELECT HISTOGRAM(voltage, "user_input", "[1,3,5,7]", 1) FROM meters; + histogram(voltage, "user_input", "[1,3,5,7]", 1) | + ======================================================= + {"lower_bin":1, "upper_bin":3, "count":0.333333} | + {"lower_bin":3, "upper_bin":5, "count":0.333333} | + {"lower_bin":5, "upper_bin":7, "count":0.333333} | + Query OK, 3 row(s) in set (0.004273s) + +taos> SELECT HISTOGRAM(voltage, 'linear_bin', '{"start": 1, "width": 3, "count": 3, "infinity": false}', 0) FROM meters; + histogram(voltage, 'linear_bin', '{"start": 1, "width": 3, " | + =================================================================== + {"lower_bin":1, "upper_bin":4, "count":3} | + {"lower_bin":4, "upper_bin":7, "count":3} | + {"lower_bin":7, "upper_bin":10, "count":3} | + Query OK, 3 row(s) in set (0.004887s) + +taos> SELECT HISTOGRAM(voltage, 'log_bin', '{"start": 1, "factor": 3, "count": 3, "infinity": true}', 0) FROM meters; + histogram(voltage, 'log_bin', '{"start": 1, "factor": 3, "count" | + =================================================================== + {"lower_bin":-inf, "upper_bin":1, "count":3} | + {"lower_bin":1, "upper_bin":3, "count":2} | + {"lower_bin":3, "upper_bin":9, "count":6} | + {"lower_bin":9, "upper_bin":27, "count":3} | + {"lower_bin":27, "upper_bin":inf, "count":1} | +``` + +### ELAPSED + +```mysql +SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]]; +``` + +**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. + +**Return value type**:Double + +**Applicable Column type**:Timestamp + +**Applicable versions**:Sicne version 2.6.0.0 + +**Applicable tables**: table, STable, outter in nested query + +**Explanations**: +- `field_name` parameter can only be the first column of a table, i.e. timestamp primary key. +- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default ime unit. +- It can be used with `INTERVAL` to get the time valid time length of each time window. Please be noted that the return value is same as the time window for all time windows except for the first and the last time window. +- `order by asc/desc` has no effect on the result. +- `group by tbname` must be used together when `elapsed` is used against a STable. +- `group by` must NOT be used together when `elapsed` is used against a table or sub table. +- When used in nested query, it's only applicable when the inner query outputs an implicit timestamp column as the primary key. For example, `select elapsed(ts) from (select diff(value) from sub1)` is legal usage while `select elapsed(ts) from (select * from sub1)` is not. +- It can't be used with `leastsquares`, `diff`, `derivative`, `top`, `bottom`, `last_row`, `interp`. + ## Selection Functions -When any selective function is used, timestamp column or tag columns including `tbname` can be specified to show that the selected value are from which rows. +When any select function is used, timestamp column or tag columns including `tbname` can be specified to show that the selected value are from which rows. ### MIN @@ -269,13 +363,13 @@ When any selective function is used, timestamp column or tag columns including ` SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; ``` -**Description**:The minimum value of a specific column in a table or STable +**Description**: The minimum value of a specific column in a table or STable -**Return value type**:Same as the data type of the column being operated +**Return value type**: Same as the data type of the column being operated upon -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **Examples**: @@ -299,13 +393,13 @@ Query OK, 1 row(s) in set (0.000950s) SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**:The maximum value of a specific column of a table or STable +**Description**: The maximum value of a specific column of a table or STable -**Return value type**:Same as the data type of the column being operated +**Return value type**: Same as the data type of the column being operated upon -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **Examples**: @@ -329,19 +423,19 @@ Query OK, 1 row(s) in set (0.000987s) SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**:The first non-null value of a specific column in a table or STable +**Description**: The first non-null value of a specific column in a table or STable -**Return value type**:Same as the column being operated +**Return value type**: Same as the column being operated upon -**Applicable column types**:Any data type +**Applicable column types**: Any data type -**Applicable table types**:table, STable +**Applicable table types**: table, STable **More explanations**: - FIRST(\*) can be used to get the first non-null value of all columns - NULL will be returned if all the values of the specified column are all NULL -- No result will NOT be returned if all the columns in the result set are all NULL +- A result will NOT be returned if all the columns in the result set are all NULL **Examples**: @@ -365,13 +459,13 @@ Query OK, 1 row(s) in set (0.001023s) SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**:The last non-NULL value of a specific column in a table or STable +**Description**: The last non-NULL value of a specific column in a table or STable -**Return value type**:Same as the column being operated +**Return value type**: Same as the column being operated upon -**Applicable column types**:Any data type +**Applicable column types**: Any data type -**Applicable table types**:table, STable +**Applicable table types**: table, STable **More explanations**: @@ -403,11 +497,11 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; **Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. -**Return value type**:Same as the column being operated +**Return value type**: Same as the column being operated upon -**Applicable column types**:Data types except for timestamp, binary, nchar and bool +**Applicable column types**: Data types except for timestamp, binary, nchar and bool -**Applicable table types**:table, STable +**Applicable table types**: table, STable **More explanations**: @@ -440,9 +534,9 @@ Query OK, 2 row(s) in set (0.000810s) SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**:The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. +**Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly. -**Return value type**:Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Data types except for timestamp, binary, nchar and bool @@ -549,7 +643,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; **Description**: The last row of a table or STable -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Any data type @@ -576,7 +670,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; Query OK, 1 row(s) in set (0.001042s) ``` -### INTERP [From version 2.3.1] +### INTERP [Since version 2.3.1] ``` SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; @@ -584,7 +678,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ **Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. -**Return value type**: same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Numeric data types @@ -593,7 +687,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ **More explanations** - `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter. -- The input data of `INTERP` is the value of the specified column, `where` can be used to filter the original data. If no `where` condition is specified then all original data is the input. +- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input. - The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. If `RANGE` is not specified, then the timestamp of the first row that matches the filter condition is treated as timestamp1, the timestamp of the last row that matches the filter condition is treated as timestamp2. - The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. If `EVERY` parameter is not used, the time windows will be considered as no ending timestamp, i.e. there is only one time window from timestamp1. - Interpolation is performed based on `FILL` parameter. No interpolation is performed if `FILL` is not used, that means either the original data that matches is returned or nothing is returned. @@ -608,7 +702,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:40:00','2017-7-14 18:40:00') FILL(LINEAR); ``` -- Get an original data every 5 seconds, no interpolation, between "2017-07-14 18:00:00" and "2017-07-14 19:00:00: +- Get original data every 5 seconds, no interpolation, between "2017-07-14 18:00:00" and "2017-07-14 19:00:00: ``` taos> SELECT INTERP(current) FROM t1 RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s); @@ -632,7 +726,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ taos> SELECT INTERP(current) FROM t1 where ts >= '2017-07-14 17:00:00' and ts <= '2017-07-14 20:00:00' RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR); ``` -### INTERP [Prior to version 2.3.1] +### INTERP [Since version 2.0.15.0] ``` SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]; @@ -640,7 +734,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL **Description**: The value of a specific column that matches the specified time slice -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Numeric data type @@ -648,7 +742,6 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL **More explanations**: -- It can be used from version 2.0.15.0 - Time slice must be specified. If there is no data matching the specified time slice, interpolation is performed based on `FILL` parameter. Conditions such as tags or `tbname` can be used `Where` clause can be used to filter data. - The timestamp specified must be within the time range of the data rows of the table or STable. If it is beyond the valid time range, nothing is returned even with `FILL` parameter. - `INTERP` can be used to query only single time point once. `INTERP` can be used with `EVERY` to get the interpolation value every time interval. @@ -662,7 +755,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL Query OK, 1 row(s) in set (0.002652s) ``` -If there is not any data corresponding to the specified timestamp, an interpolation value is returned if interpolation policy is specified by `FILL` parameter; or nothing is returned\ +If there is no data corresponding to the specified timestamp, an interpolation value is returned if interpolation policy is specified by `FILL` parameter; or nothing is returned. ``` taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005'; @@ -696,11 +789,11 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; **Parameter value range**: k: [1,100] offset_val: [0,100] -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Any data type except form timestamp, i.e. the primary key -**Applicable versions**: From version 2.6.0.0 +**Applicable versions**: Since version 2.6.0.0 **Examples**: @@ -732,11 +825,11 @@ SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause]; **Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. -**Return value type**: Same as the column or tag being operated +**Return value type**: Same as the column or tag being operated upon **Applicable column types**: Any data types except for timestamp -**支持版本**: From version 2.6.0.0 +**Applicable versions**: Since version 2.6.0.0 **More explanations**: @@ -780,7 +873,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored. -**Return value type**: Same as the column being operated +**Return value type**: Same as the column being operated upon **Applicable column types**: Data types except for timestamp, binary, nchar and bool @@ -789,8 +882,8 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **More explanations**: - The number of result rows is the number of rows subtracted by one, no output for the first row -- From version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname` -- From version 2.6.0, `ignore_negative` parameter is supported +- Since version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname` +- Since version 2.6.0, `ignore_negative` parameter is supported **Examples**: @@ -809,7 +902,7 @@ Query OK, 2 row(s) in set (0.001162s) SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause]; ``` -**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval` 参数指定, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored. +**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored. **Return value type**: Double precision floating point @@ -819,7 +912,7 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER **More explanations**: -- It is available from version 2.1.3.0, the number of result rows is the number of total rows in the time range subtracted by one, no output for the first row.\ +- It is available from version 2.1.3.0, the number of result rows is the number of total rows in the time range subtracted by one, no output for the first row. - It can be used together with `GROUP BY tbname` against a STable. **Examples**: @@ -850,7 +943,7 @@ SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Applicable table types**: table, STable -**More explanations**: Can be used on a column of TIMESTAMP type, the result is the time range size.可 +**More explanations**: Can be used on a column of TIMESTAMP type, the result is the time range size. **Examples**: @@ -874,7 +967,7 @@ Query OK, 1 row(s) in set (0.000836s) SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The round up value of a specific column +**Description**: The rounded up value of a specific column **Return value type**: Same as the column being used @@ -882,7 +975,7 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Applicable table types**: table, STable -**Applicable nested query**: inner query and outer query +**Applicable nested query**: Inner query and outer query **More explanations**: @@ -896,9 +989,9 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause]; SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The round down value of a specific column +**Description**: The rounded down value of a specific column -**More explanations**: The restrictions are same as `CEIL` function. +**More explanations**: The restrictions are same as those of the `CEIL` function. ### ROUND @@ -906,7 +999,7 @@ SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause]; SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` -**Description**: The round value of a specific column. +**Description**: The rounded value of a specific column. **More explanations**: The restrictions are same as `CEIL` function. @@ -933,7 +1026,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; - Can only be used with aggregate functions - `Group by tbname` must be used together on a STable to force the result on a single timeline -**Applicable versions**: From 2.3.0.x +**Applicable versions**: Since 2.3.0.x ### MAVG @@ -955,10 +1048,10 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; - Arithmetic operation can't be performed on the result of `MAVG`. - Can only be used with data columns, can't be used with tags. -- Can't be used with aggregate functions.\(Aggregation)函数一起使用; -- Must be used with `GROUP BY tbname` when it's used on a STable to force the result on each single timeline.该 +- Can't be used with aggregate functions. +- Must be used with `GROUP BY tbname` when it's used on a STable to force the result on each single timeline. -**Applicable versions**: From 2.3.0.x +**Applicable versions**: Since 2.3.0.x ### SAMPLE @@ -981,7 +1074,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause]; - Arithmetic operation can't be operated on the result of `SAMPLE` function - Must be used with `Group by tbname` when it's used on a STable to force the result on each single timeline -**Applicable versions**: From 2.3.0.x +**Applicable versions**: Since 2.3.0.x ### ASIN @@ -1012,9 +1105,9 @@ SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause] SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause] ``` -**Description**: The anti-cosine of a specific column 获 +**Description**: The anti-cosine of a specific column -**Return value type**: ouble if the input value is not NULL; or NULL if the input value is NULL +**Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL **Applicable data types**: Data types except for timestamp, binary, nchar, bool @@ -1037,7 +1130,7 @@ SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: anti-tangent of a specific column -**Description**: The anti-cosine of a specific column 获 +**Description**: The anti-cosine of a specific column **Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL @@ -1062,7 +1155,7 @@ SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The sine of a specific column -**Description**: The anti-cosine of a specific column 获 +**Description**: The anti-cosine of a specific column **Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL @@ -1087,7 +1180,7 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The cosine of a specific column -**Description**: The anti-cosine of a specific column 获 +**Description**: The anti-cosine of a specific column **Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL @@ -1112,7 +1205,7 @@ SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The tangent of a specific column -**Description**: The anti-cosine of a specific column 获 +**Description**: The anti-cosine of a specific column **Return value type**: Double if the input value is not NULL; or NULL if the input value is NULL @@ -1183,7 +1276,7 @@ SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause] **Description**: The absolute of a specific column -**Return value type**: UBIGINT if the input value is integer; DOUBLE if the input value is FLOAT/DOUBLE 如 +**Return value type**: UBIGINT if the input value is integer; DOUBLE if the input value is FLOAT/DOUBLE **Applicable data types**: Data types except for timestamp, binary, nchar, bool @@ -1460,8 +1553,8 @@ SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WH **More explanations**: -- Arithmetic operations can be performed on two or more columns, `()` can be used to control the precedence -- NULL doesn't participate the operation, if one of the operands is NULL then result is NULL +- Arithmetic operations can be performed on two or more columns, Parentheses `()` can be used to control the order of precedence. +- NULL doesn't participate in the operation i.e. if one of the operands is NULL then result is NULL. **Examples**: @@ -1586,7 +1679,7 @@ Query OK, 6 row(s) in set (0.002613s) ## Time Functions -From version 2.6.0.0, below time related functions can be used in TDengine. +Since version 2.6.0.0, below time related functions can be used in TDengine. ### NOW @@ -1757,7 +1850,7 @@ SELECT TO_UNIXTIMESTAMP(datetime_string | ts_col) FROM { tb_name | stb_name } [W **More explanations**: -- The input string must be compatible with ISO8601/RFC3339 standard, 0 will be returned if the string can't be covnerted +- The input string must be compatible with ISO8601/RFC3339 standard, 0 will be returned if the string can't be converted - The precision of the returned timestamp is same as the precision set for the current data base in use **Examples**: @@ -1840,6 +1933,8 @@ SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day). - The precision of the returned timestamp is same as the precision set for the current data base in use +**Applicable versions**:Since version 2.6.0.0 + **Examples**: ```sql diff --git a/docs-en/12-taos-sql/08-interval.md b/docs-en/12-taos-sql/08-interval.md index 7c365fc9a66bff349bc9a13b9954f9c395510bd2..acfb0de0e1521fd8c6a068497a3df7a17941524c 100644 --- a/docs-en/12-taos-sql/08-interval.md +++ b/docs-en/12-taos-sql/08-interval.md @@ -3,36 +3,36 @@ sidebar_label: Interval title: Aggregate by Time Window --- -Aggregate by time window is supported in TDengine. For example, each temperature sensor reports the temperature every second, the average temperature every 10 minutes can be retrieved by query with time window. -Window related clauses are used to divide the data set to be queried into subsets and then aggregate. There are three kinds of windows, time window, status window, and session window. There are two kinds of time windows, sliding window and flip time window. +Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. +Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. ## Time Window -`INTERVAL` claused is used to generate time windows of same time interval, `SLIDING` is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining continuous query both the size of time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time range of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time window. +The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window. -![Time Window](/img/sql/timewindow-1.png) +![TDengine Database Time Window](./timewindow-1.webp) -`INTERVAL` and `SLIDING` should be used with aggregate functions and selection functions. Below SQL statement is illegal because no aggregate or selection function is used with `INTERVAL`. +`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`. ``` SELECT * FROM temp_tb_1 INTERVAL(1m); ``` -The time step specified by `SLIDING` can't exceed the time interval specified by `INTERVAL`. Below SQL statement is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. +The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`. ``` SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); ``` -When the time length specified by `SLIDING` is same as that specified by `INTERVAL`, sliding window is actually flip window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. From version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please be noted that the `timezone` parameter should be configured to same value in the `taos.cfg` configuration file on client side and server side. +When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip/tumbling window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. Since version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. ## Status Window -In case of using integer, bool, or string to represent the device status at a moment, the continuous rows with same status belong to same status window. Once the status changes, the status window closes. As shown in the following figure,there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. +In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now. -![Status Window](/img/sql/timewindow-3.png) +![TDengine Database Status Window](./timewindow-3.webp) -`STATE_WINDOW` is used to specify the column based on which to define status window, for example: +`STATE_WINDOW` is used to specify the column on which the status window will be based. For example: ``` SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); @@ -44,17 +44,17 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status); SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); ``` -The primary key, i.e. timestamp, is used to determine which session window the row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to same session window; otherwise they belong to two different time windows. As shown in the figure below, if the limit of time interval for session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. +The primary key, i.e. timestamp, is used to determine which session window a row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different session windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds. -![Session Window](/img/sql/timewindow-2.png) +![TDengine Database Session Window](./timewindow-2.webp) -If the time interval between two continuous rows are withint the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now. +If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now. ## More On Window Aggregate ### Syntax -The full syntax of aggregate by window is as following: +The full syntax of aggregate by window is as follows: ```sql SELECT function_list FROM tb_name @@ -73,11 +73,11 @@ SELECT function_list FROM stb_name ### Restrictions -- Aggregate functions and selection functions can be used in `function_list`, with each function having only one output, for example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple ouput can't be used, for example DIFF or arithmetic operations. +- Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used. - `LAST_ROW` can't be used together with window aggregate. - Scalar functions, like CEIL/FLOOR, can't be used with window aggregate. - `WHERE` clause can be used to specify the starting and ending time and other filter conditions -- `FILL` clause is used to specify how to fill when there is data missing in any window, including: \ +- `FILL` clause is used to specify how to fill when there is data missing in any window, including: 1. NONE: No fill (the default fill mode) 2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` 3. PREV:Fill with the previous non-NULL value, `FILL(PREV)` @@ -87,22 +87,23 @@ SELECT function_list FROM stb_name :::info -1. Huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum interpolation values that can be returned in single query is 10,000,000. -2. The result set is in the ascending order of timestamp in aggregate by time window aggregate. +1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000. +2. The result set is in ascending order of timestamp when you aggregate by time window. 3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `GROUP BY` is not used in the query, the result set will be returned in ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group. - ::: + +::: Aggregate by time window is also used in continuous query, please refer to [Continuous Query](/develop/continuous-query). ## Examples -The table of intelligent meters can be created like below SQL statement: +A table of intelligent meters can be created by the SQL statement below: ```sql CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); ``` -The average current, maximum current and median of current in every 10 minutes of the past 24 hours can be calculated using below SQL statement, with missing value filled with the previous non-NULL value. +The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values. ``` SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters diff --git a/docs-en/12-taos-sql/09-limit.md b/docs-en/12-taos-sql/09-limit.md index 873e484fbb4731294d00df323f8e0d2cbc6b1d30..db55cdd69e7bd29ca66ee15b61f28991568d9556 100644 --- a/docs-en/12-taos-sql/09-limit.md +++ b/docs-en/12-taos-sql/09-limit.md @@ -4,9 +4,9 @@ title: Limits & Restrictions ## Naming Rules -1. Only English characters, digits and underscore are allowed -2. Can't be started with digits -3. Case Insensitive without escape character "\`" +1. Only characters from the English alphabet, digits and underscore are allowed +2. Names cannot start with a digit +3. Case insensitive without escape character "\`" 4. Identifier with escape character "\`" To support more flexible table or column names, a new escape character "\`" is introduced. For more details please refer to [escape](/taos-sql/escape). @@ -16,38 +16,38 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`. ## General Limits -- Maximum length of database name is 32 bytes -- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator -- Maximum length of each data row is 48K bytes from version 2.1.7.0 , before which the limit is 16K bytes. Please be noted that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. -- Maximum of column name is 64. +- Maximum length of database name is 32 bytes. +- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator. +- Maximum length of each data row is 48K bytes since version 2.1.7.0 , before which the limit was 16K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. +- Maximum length of column name is 64. - Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp. - Maximum length of tag name is 64. - Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values should not exceed 16K bytes. -- Maximum length of singe SQL statement is 1048576, i.e. 1 MB bytes. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576]. -- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`, functions in the query statement may constitute columns. Error will be returned if the limit is exceeded. -- Maximum numbers of databases, STables, tables are only depending on the system resources. -- Maximum of database name is 32 bytes, can't include "." and special characters. -- Maximum replica number of database is 3 -- Maximum length of user name is 23 bytes -- Maximum length of password is 15 bytes -- Maximum number of rows depends on the storage space only. -- Maximum number of tables depends on the number of nodes only. -- Maximum number of databases depends on the number of nodes only. -- Maximum number of vnodes for single database is 64. +- Maximum length of singe SQL statement is 1048576, i.e. 1 MB. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576]. +- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded. +- Maximum numbers of databases, STables, tables are dependent only on the system resources. +- Maximum of database name is 32 bytes, and it can't include "." or special characters. +- Maximum number of replicas for a database is 3. +- Maximum length of user name is 23 bytes. +- Maximum length of password is 15 bytes. +- Maximum number of rows depends only on the storage space. +- Maximum number of tables depends only on the number of nodes. +- Maximum number of databases depends only on the number of nodes. +- Maximum number of vnodes for a single database is 64. ## Restrictions of `GROUP BY` -`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with one restriction that only one column and the number of unique values on that column is lower than 100,000. Please be noted that `GROUP BY` can't be performed on float or double type. +`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with the only restriction being it can only be performed on one data column and the number of unique values in that column is lower than 100,000. Please note that `GROUP BY` cannot be performed on float or double types. ## Restrictions of `IS NOT NULL` -`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `<\>""` can only be used on non-numeric data types. +`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `< > ""` can only be used on non-numeric data types. ## Restrictions of `ORDER BY` -- Only one `order by` is allowed for normal table and sub table. +- Only one `order by` is allowed for normal table and subtable. - At most two `order by` are allowed for STable, and the second one must be `ts`. -- `order by tag` must be used with `group by tag` on same tag, this rule is also applicable to `tbname`. +- `order by tag` must be used with `group by tag` on same tag. This rule is also applicable to `tbname`. - `order by column` must be used with `group by column` or `top/bottom` on same column. This rule is applicable to table and STable. - `order by ts` is applicable to table and STable. - If `order by ts` is used with `group by`, the result set is sorted using `ts` in each group. @@ -56,11 +56,11 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]`. ### Name Restrictions of Table/Column -The name of a table or column can only be composed of ASCII characters, digits and underscore, while digit can't be used as the beginning. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator. +The name of a table or column can only be composed of ASCII characters, digits and underscore and it cannot start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator. ### Name Restrictions After Escaping -To support more flexible table or column names, new escape character "`" is introduced in TDengine to avoid the conflict between table name and keywords and break the above restrictions for table name. The escape character is not counted in the length of table name. +To support more flexible table or column names, new escape character "\`" is introduced in TDengine to avoid the conflict between table name and keywords and break the above restrictions for table names. The escape character is not counted in the length of table name. With escaping, the string inside escape characters are case sensitive, i.e. will not be converted to lower case internally. diff --git a/docs-en/12-taos-sql/10-json.md b/docs-en/12-taos-sql/10-json.md index 60468f1e0fd75cc04cae8a91b0a1a22b9bd3600b..7460a5e0ba3ce78ee7744569cda460c477cac19c 100644 --- a/docs-en/12-taos-sql/10-json.md +++ b/docs-en/12-taos-sql/10-json.md @@ -4,7 +4,7 @@ title: JSON Type ## Syntax -1. Tag of JSON type +1. Tag of type JSON ```sql create STable s1 (ts timestamp, v1 int) tags (info json); @@ -12,7 +12,7 @@ title: JSON Type create table s1_1 using s1 tags ('{"k1": "v1"}'); ``` -2. -> Operator of JSON +2. "->" Operator of JSON ```sql select * from s1 where info->'k1' = 'v1'; @@ -20,7 +20,7 @@ title: JSON Type select info->'k1' from s1; ``` -3. contains Operator of JSON +3. "contains" Operator of JSON ```sql select * from s1 where info contains 'k2'; @@ -30,7 +30,7 @@ title: JSON Type ## Applicable Operations -1. When JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used. +1. When a JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used. ```sql select * from s1 where info->'k1' match 'v*'; @@ -42,9 +42,9 @@ title: JSON Type select * from s1 where info->'k1' is not null; ``` -2. Tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query, for example `group by json->'key'` +2. A tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query; for example `group by json->'key'` -3. `Distinct` can be used with tag of JSON type +3. `Distinct` can be used with a tag of type JSON ```sql select distinct info->'k1' from s1; @@ -52,29 +52,29 @@ title: JSON Type 4. Tag Operations - The value of JSON tag can be altered. Please be noted that the full JSON will be override when doing this. + The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this. - The name of JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. + The name of a JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed. ## Other Restrictions -- JSON type can only be used for tag. There can be only one tag of JSON type, and it's exclusive to any other types of tag. +- JSON type can only be used for a tag. There can be only one tag of JSON type, and it's exclusive to any other types of tags. - The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes. - JSON format: - - The input string for JSON can be empty, i.e. "", "\t", or NULL, but can't be non-NULL string, bool or array. - - object can be {}, and the whole JSON is empty if so. Key can be "", and it's ignored if so. - - value can be int, double, string, boll or NULL, can't be array. Nesting is not allowed, that means value can't be another JSON. + - The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array. + - object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so. + - value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON. - If one key occurs twice in JSON, only the first one is valid. - Escape characters are not allowed in JSON. -- NULL is returned if querying a key that doesn't exist in JSON. +- NULL is returned when querying a key that doesn't exist in JSON. - If a tag of JSON is the result of inner query, it can't be parsed and queried in the outer query. -For example, below SQL statements are not supported. +For example, the SQL statements below are not supported. ```sql; select jtag->'key' from (select jtag from STable); diff --git a/docs-en/12-taos-sql/12-keywords.md b/docs-en/12-taos-sql/12-keywords.md index fa750300b71251e1172dba13f91d05822f9ac1f4..56a82a02a1fada712141f3572b761e0cd18576c6 100644 --- a/docs-en/12-taos-sql/12-keywords.md +++ b/docs-en/12-taos-sql/12-keywords.md @@ -46,3 +46,44 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam | CONNECTIONS | HAVING | NOT | SOFFSET | VNODES | | CONNS | ID | NOTNULL | STable | WAL | | COPY | IF | NOW | STableS | WHERE | +| _C0 | _QSTART | _QSTOP | _QDURATION | _WSTART | +| _WSTOP | _WDURATION | + +## Explanations +### TBNAME +`TBNAME` can be considered as a special tag, which represents the name of the subtable, in STable. + +Get the table name and tag values of all subtables in a STable. +```mysql +SELECT TBNAME, location FROM meters; + +Count the number of subtables in a STable. +```mysql +SELECT COUNT(TBNAME) FROM meters; +``` + +Only filter on TAGS can be used in WHERE clause in the above two query statements. +```mysql +taos> SELECT TBNAME, location FROM meters; + tbname | location | +================================================================== + d1004 | California.SanFrancisco | + d1003 | California.SanFrancisco | + d1002 | California.LosAngeles | + d1001 | California.LosAngeles | +Query OK, 4 row(s) in set (0.000881s) + +taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; + count(tbname) | +======================== + 2 | +Query OK, 1 row(s) in set (0.001091s) +``` +### _QSTART/_QSTOP/_QDURATION +The start, stop and duration of a query time window (Since version 2.6.0.0). + +### _WSTART/_WSTOP/_WDURATION +The start, stop and duration of aggegate query by time window, like interval, session window, state window (Since version 2.6.0.0). + +### _c0 +The first column of a table or STable. \ No newline at end of file diff --git a/docs-en/12-taos-sql/_category_.yml b/docs-en/12-taos-sql/_category_.yml index 0bfd46c860da0afdade1ad12e04f02737c39cedc..74a3b6309e0a4ad35feb674f544c689ae1992299 100644 --- a/docs-en/12-taos-sql/_category_.yml +++ b/docs-en/12-taos-sql/_category_.yml @@ -1 +1 @@ -label: SQL +label: TDengine SQL diff --git a/docs-en/12-taos-sql/index.md b/docs-en/12-taos-sql/index.md index 93ba8ff87e8ee7fc6ef90bf369a85f1c8e0d6f9e..33656338a7bba38dc55cf536bdba8e95309c5acf 100644 --- a/docs-en/12-taos-sql/index.md +++ b/docs-en/12-taos-sql/index.md @@ -1,13 +1,11 @@ --- -title: TAOS SQL -description: "The syntax supported by TAOS SQL " +title: TDengine SQL +description: "The syntax supported by TDengine SQL " --- -This document explains the syntax about operating database, table, STable, inserting data, selecting data, functions and some tips that can be used in TAOS SQL. It would be easier to understand with some fundamental knowledge of SQL. +This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. -TAOS SQL is the major interface for users to write data into or query from TDengine. For users to easily use, syntax similar to standard SQL is provided. However, please be noted that TAOS SQL is not standard SQL. Besides, because TDengine doesn't provide the functionality of deleting time series data, corresponding statements are not provided in TAOS SQL. - -TAOS SQL doesn't support abbreviation for keywords, for example `DESCRIBE` can't be abbreviated as `DESC`. +TDengine SQL is the major interface for users to write data into or query from TDengine. For ease of use, the syntax is similar to that of standard SQL. However, please note that TDengine SQL is not standard SQL. For instance, TDengine doesn't provide a delete function for time series data and so corresponding statements are not provided in TDengine SQL. Syntax Specifications used in this chapter: @@ -16,7 +14,7 @@ Syntax Specifications used in this chapter: - | means one of a few options, excluding | itself. - … means the item prior to it can be repeated multiple times. -To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of meters. Assuming each meter collects 3 data: current, voltage, phase. The data model is as below: +To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below: ```sql taos> DESCRIBE meters; @@ -30,4 +28,4 @@ taos> DESCRIBE meters; groupid | INT | 4 | TAG | ``` -The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003, d1004 respectively based on the data model of TDengine. +The data set includes the data collected by 4 meters, the corresponding table name is d1001, d1002, d1003 and d1004 based on the data model of TDengine. diff --git a/docs-en/12-taos-sql/timewindow-1.webp b/docs-en/12-taos-sql/timewindow-1.webp new file mode 100644 index 0000000000000000000000000000000000000000..82747558e96df752a0010d85be79a4af07e4a1df Binary files /dev/null and b/docs-en/12-taos-sql/timewindow-1.webp differ diff --git a/docs-en/12-taos-sql/timewindow-2.webp b/docs-en/12-taos-sql/timewindow-2.webp new file mode 100644 index 0000000000000000000000000000000000000000..8f1314ae34f7f5c5cca1d3cb80455f555fad38c3 Binary files /dev/null and b/docs-en/12-taos-sql/timewindow-2.webp differ diff --git a/docs-en/12-taos-sql/timewindow-3.webp b/docs-en/12-taos-sql/timewindow-3.webp new file mode 100644 index 0000000000000000000000000000000000000000..5bd16e68e7fd5da6805551e9765975277cd5d4d9 Binary files /dev/null and b/docs-en/12-taos-sql/timewindow-3.webp differ diff --git a/docs-en/13-operation/01-pkg-install.md b/docs-en/13-operation/01-pkg-install.md index 00802506e681a9e27e338fef363e4157379c5a85..c098002962d62aa0acc7a94462c052303cb2ed90 100644 --- a/docs-en/13-operation/01-pkg-install.md +++ b/docs-en/13-operation/01-pkg-install.md @@ -6,7 +6,7 @@ description: Install, Uninstall, Start, Stop and Upgrade import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -TDengine community version provides dev and rpm package for users to choose based on the system environment. deb supports Debian, Ubuntu and systems derived from them. rpm supports CentOS, RHEL, SUSE and systems derived from them. Furthermore, tar.gz package is provided for enterprise customers. +TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers. ## Install @@ -14,7 +14,7 @@ TDengine community version provides dev and rpm package for users to choose base 1. Download deb package from official website, for example TDengine-server-2.4.0.7-Linux-x64.deb -2. In the directory where the package is located, execute below command +2. In the directory where the package is located, execute the command below ```bash $ sudo dpkg -i TDengine-server-2.4.0.7-Linux-x64.deb @@ -46,7 +46,7 @@ TDengine is installed successfully! 1. Download rpm package from official website, for example TDengine-server-2.4.0.7-Linux-x64.rpm; -2. In the directory where the package is located, execute below command +2. In the directory where the package is located, execute the command below ``` $ sudo rpm -ivh TDengine-server-2.4.0.7-Linux-x64.rpm @@ -77,7 +77,7 @@ TDengine is installed successfully! 1. Download the tar.gz package, for example TDengine-server-2.4.0.7-Linux-x64.tar.gz; - 2、In the directory where the package is located, firstly decompress the file, then switch to the sub-directory generated in decompressing, i.e. "TDengine-enterprise-server-2.4.0.7/" in this example, and execute the `install.sh` script. +2. In the directory where the package is located, first decompress the file, then switch to the sub-directory generated in decompressing, i.e. "TDengine-enterprise-server-2.4.0.7/" in this example, and execute the `install.sh` script. ```bash $ tar xvzf TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz @@ -124,7 +124,7 @@ taoskeeper is installed, enable it by `systemctl enable taoskeeper` ``` :::info -Some configuration will be prompted for users to provide when install.sh is executing, the interactive mode can be disabled by executing `./install.sh -e no`. `./install -h` can show all parameters and detailed explanation. +Users will be prompted to enter some configuration information when install.sh is executing. The interactive mode can be disabled by executing `./install.sh -e no`. `./install.sh -h` can show all parameters with detailed explanation. ::: @@ -132,7 +132,7 @@ Some configuration will be prompted for users to provide when install.sh is exec
:::note -When installing on the first node in the cluster, when "Enter FQDN:" is prompted, nothing needs to be provided. When installing on following nodes, when "Enter FQDN:" is prompted, the end point of the first dnode in the cluster can be input if it has been already up; or just ignore it and configure later after installation is done. +When installing on the first node in the cluster, at the "Enter FQDN:" prompt, nothing needs to be provided. When installing on subsequent nodes, at the "Enter FQDN:" prompt, you must enter the end point of the first dnode in the cluster if it is already up. You can also just ignore it and configure it later after installation is finished. ::: @@ -181,14 +181,14 @@ taosKeeper is removed successfully! :::note -- It's strongly suggested not to use multiple kinds of installation packages on single host TDengine -- After deb package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as below command and then reinstalling. +- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. +- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed. ```bash $ sudo rm -f /var/lib/dpkg/info/tdengine* ``` -- After rpm package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as below command and then reinstalling. +- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed. ```bash $ sudo rpm -e --noscripts tdengine @@ -219,7 +219,7 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/ During the installation process: - Configuration directory, data directory, and log directory are created automatically if they don't exist -- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg if not existing +- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg - The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data - The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log - The executables at /usr/local/taos/bin are linked to /usr/bin @@ -228,14 +228,14 @@ During the installation process: :::note -- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution because data can't be recovered once -- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/loca/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used. +- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data. +- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used. ## Start and Stop -Linux system services `systemd`, `systemctl` or `service` is used to start, stop and restart TDengine. The server process of TDengine is `taosd`, which is started automatically after the Linux system is started. System operator can use `systemd`, `systemctl` or `service` to start, stop or restart TDengine server. +Linux system services `systemd`, `systemctl` or `service` are used to start, stop and restart TDengine. The server process of TDengine is `taosd`, which is started automatically after the Linux system is started. System operators can use `systemd`, `systemctl` or `service` to start, stop or restart TDengine server. -For example, if using `systemctl` , the commands to start, stop, restart and check TDengine server are as below: +For example, if using `systemctl` , the commands to start, stop, restart and check TDengine server are below: - Start server:`systemctl start taosd` @@ -263,20 +263,22 @@ Active: inactive (dead) There are two aspects in upgrade operation: upgrade installation package and upgrade a running server. -Upgrading package should follow the steps mentioned previously to firstly uninstall old version then install new version. +To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version. -Upgrading a running server is much more complex. Firstly please check the version number of old version and new version. The version number of TDengine consists of 4 sections, only the first 3 section match can the old version be upgraded to the new version. The steps of upgrading a running server are as below: +Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below: - Stop inserting data -- Make sure all data persisted into disk +- Make sure all data is persisted to disk +- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.) - Stop the cluster of TDengine - Uninstall old version and install new version - Start the cluster of TDengine -- Make some simple queries to make sure no data loss -- Make some simple data insertion to make sure the cluster works well -- Restore business data +- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss +- Run some simple data insertion statements to make sure the cluster works well +- Restore business services :::warning + TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version. ::: diff --git a/docs-en/13-operation/02-planning.mdx b/docs-en/13-operation/02-planning.mdx index d0fc4cccbce29e596304787edae84b94056ff75f..c1baf92dbfa8d93f83174c05c2ea631d1a469739 100644 --- a/docs-en/13-operation/02-planning.mdx +++ b/docs-en/13-operation/02-planning.mdx @@ -2,19 +2,19 @@ title: Resource Planning --- -The computing and storage resources need to be planned if using TDengine to build an IoT platform. How to plan the CPU, memory and disk required will be described in this chapter. +It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter. ## Memory Requirement of Server Side -The number of vgroups created for each database is same as the number of CPU cores by default and can be configured by parameter `maxVgroupsPerDb`, each vnode in a vgroup stores one replica. Each vnode consumes fixed size of memory, i.e. `blocks` \* `cache`. Besides, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using below formula: +By default, the number of vgroups created for each database is the same as the number of CPU cores. This can be configured by the parameter `maxVgroupsPerDb`. Each vnode in a vgroup stores one replica. Each vnode consumes a fixed amount of memory, i.e. `blocks` \* `cache`. In addition, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below: ``` Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB) ``` -For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 并且一个 DB 中有 10 万张表,单副本,标签总长度是 256 字节,则这个 DB 总的内存需求为:64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M. +For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` is 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M. -In real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`.在 +In the real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`. ``` taosd_memory = vnode_memory + mnode_memory + query_memory @@ -22,29 +22,29 @@ In real operation of TDengine, we are more concerned about the memory used by ea In the above formula: -1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula mentioned previously then dividing by number of dnodes and multiplying the number of replicas. +1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula for Database Memory Size, mentioned above, then dividing by number of dnodes and multiplying the number of replicas. ``` - vnode_memory = sum(Database memory) / number_of_dnodes \* replica + vnode_memory = (sum(Database Memory Size) / number_of_dnodes) * replica ``` 2. "mnode_memory" of a `taosd` process is the memory consumed by a mnode. If there is one (and only one) mnode hosted in a `taosd` process, the memory consumed by "mnode" is "0.2KB \* the total number of tables in the cluster". 3. "query_memory" is the memory used when processing query requests. Each ongoing query consumes at least "0.2 KB \* total number of involved tables". -Please be noted that the above formulas can only be used to estimate the minimum memory requirement, instead of maximum memory usage. In a real production environment, it's better to preserve some redundance beyond the estimated minimum memory requirement. If memory is abundant, it's suggested to increase the value of parameter `blocks` to speed up data insertion and data query. +Please note that the above formulas can only be used to estimate the minimum memory requirement, instead of maximum memory usage. In a real production environment, it's better to reserve some redundance beyond the estimated minimum memory requirement. If memory is abundant, it's suggested to increase the value of parameter `blocks` to speed up data insertion and data query. ## Memory Requirement of Client Side -The client programs use TDengine client driver `taosc` to connect to the server side, there is also memory requirement for a client program. +For the client programs using TDengine client driver `taosc` to connect to the server side there is a memory requirement as well. -The memory consumed by a client program is mainly about the SQL statements for data insertion, caching of table metadata, and some internal use. Assuming maximum number of tables is N (the memory consumed by the metadata of each table is 256 bytes), maximum number of threads for parallel insertion is T, maximum length of a SQL statement is S (normally 1 MB), the memory required by a client program can be estimated using below formula: +The memory consumed by a client program is mainly about the SQL statements for data insertion, caching of table metadata, and some internal use. Assuming maximum number of tables is N (the memory consumed by the metadata of each table is 256 bytes), maximum number of threads for parallel insertion is T, maximum length of a SQL statement is S (normally 1 MB), the memory required by a client program can be estimated using the below formula: ``` M = (T * S * 3 + (N / 4096) + 100) ``` -For example, if the number of parallel data insertion threads is 100, total number of tables is 10,000,000, then minimum memory requirement of a client program is: +For example, if the number of parallel data insertion threads is 100, total number of tables is 10,000,000, then the minimum memory requirement of a client program is: ``` 100 * 3 + (10000000 / 4096) + 100 = 2741 (MBytes) @@ -56,10 +56,10 @@ So, at least 3GB needs to be reserved for such a client. The CPU resources required depend on two aspects: -- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The computing resource consumed between inserting 1 row one time and inserting 10 rows one time is very small. So, the more the rows to insert one time, the higher the efficiency. Inserting in bach also exposes requirement for the client side which needs to cache rows and insert in batch once the cached rows reaches a threshold. -- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, etc provided by user. +- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold. +- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, and other requirements provided by users. -In short words, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources. +In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources. ## Disk Requirement @@ -69,14 +69,14 @@ The compression ratio in TDengine is much higher than that in RDBMS. In most cas Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable ``` -For example, there are 10,000,000 meters, while each meter collects data every 15 minutes and the data size of each collection si 128 bytes, so the raw data size of one year is: 10000000 \* 128 \* 24 \* 60 / 15 \* 365 = 44.8512(TB). Assuming compression ratio is 5, the actual disk size is: 44.851 / 5 = 8.97024(TB). +For example, there are 10,000,000 meters, while each meter collects data every 15 minutes and the data size of each collection is 128 bytes, so the raw data size of one year is: 10000000 \* 128 \* 24 \* 60 / 15 \* 365 = 44.8512(TB). Assuming compression ratio is 5, the actual disk size is: 44.851 / 5 = 8.97024(TB). -Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device, and this is transparent to application programs. +Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device. This is completely transparent to application programs. -To increase the performance, multiple disks can be setup for parallel data reading or data inserting. Please be noted that expensive disk array is not necessary because replications are used in TDengine to provide high availability. +To increase performance, multiple disks can be setup for parallel data reading or data inserting. Please note that an expensive disk array is not necessary because replications are used in TDengine to provide high availability. ## Number of Hosts -A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulas mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts are same in resources, the number of hosts can be derived easily. +A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily. **Quick Estimation for CPU, Memory and Disk** Please refer to [Resource Estimate](https://www.taosdata.com/config/config.html). diff --git a/docs-en/13-operation/03-tolerance.md b/docs-en/13-operation/03-tolerance.md index 367474cddb7395ea84a4a33623d1643e487f9d09..d4d48d7fcdc2c990b6ea0821e2347c70a809ed79 100644 --- a/docs-en/13-operation/03-tolerance.md +++ b/docs-en/13-operation/03-tolerance.md @@ -7,23 +7,26 @@ title: Fault Tolerance & Disaster Recovery TDengine uses **WAL**, i.e. Write Ahead Log, to achieve fault tolerance and high reliability. -When a data block is received by TDengine, the original data block is firstly written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally due to any reason and then restarted. +When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally for any reason and then restarted. There are 2 configuration parameters related to WAL: -- walLevel:0:wal is disabled; 1:wal is enabled without fsync; 2:wal is enabled with fsync. -- fsync:only valid when walLevel is set to 2, it specified the interval of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written. +- walLevel: + - 0:wal is disabled + - 1:wal is enabled without fsync + - 2:wal is enabled with fsync +- fsync:This parameter is only valid when walLevel is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written. -To achieve absolutely no data loss, walLevel needs to be set to 2 and fsync needs to be set to 1. The penalty is the performance of data ingestion downgrades. However, if the concurrent threads of data insertion on the client side can reach a big enough number, for example 50, the data ingestion performance would be still good enough, our verification shows that the drop is only 30% compared to fsync is set to 3,000 milliseconds. +To achieve absolutely no data loss, walLevel should be set to 2 and fsync should be set to 1. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when fsync is set to 3,000 milliseconds. ## Disaster Recovery -TDengine uses replications to provide high availability and disaster recovery capability. +TDengine uses replication to provide high availability and disaster recovery capability. -TDengine cluster is managed by mnode. To make sure the high availability of mnode, multiple replicas can be configured by system parameter `numOfMnodes`. The data replication between mnode replicas is in synchronous way to guarantee the metadata consistency. +A TDengine cluster is managed by mnode. To ensure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency. -The number of replicas for time series data in TDengine is associated with each database, there can be a lot of databases in a cluster while each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1. +The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1. -The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create table. +The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table. -As long as the dnodes of a TDengine cluster are deployed on different physical machines and replica number is set to bigger than 1, high availability can be achieved without any other assistance. If dnodes of TDengine cluster are deployed in geographically different data centers, disaster recovery can be achieved too. +As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers. diff --git a/docs-en/13-operation/06-admin.md b/docs-en/13-operation/06-admin.md index 1ca0dfeaf4a4b0b4c597e1a5ec6ece20224e2dba..458a91b88c6d8319fe8b84c2b34d8ff968957910 100644 --- a/docs-en/13-operation/06-admin.md +++ b/docs-en/13-operation/06-admin.md @@ -2,7 +2,7 @@ title: User Management --- -System operator can use TDengine CLI `taos` to create or remove user or change password. The SQL command is as low: +A system operator can use TDengine CLI `taos` to create or remove users or change passwords. The SQL commands are documented below: ## Create User @@ -10,7 +10,7 @@ System operator can use TDengine CLI `taos` to create or remove user or change p CREATE USER PASS <'password'>; ``` -When creating a user and specifying the user name and password, password needs to be quoted using single quotes. +When creating a user and specifying the user name and password, the password needs to be quoted using single quotes. ## Drop User @@ -18,7 +18,7 @@ When creating a user and specifying the user name and password, password needs t DROP USER ; ``` -Drop a user can only be performed by root. +Dropping a user can only be performed by root. ## Change Password @@ -26,7 +26,7 @@ Drop a user can only be performed by root. ALTER USER PASS <'password'>; ``` -To keep the case of the password when changing password, password needs to be quoted using single quotes. +To keep the case of the password when changing password, the password needs to be quoted using single quotes. ## Change Privilege @@ -36,7 +36,7 @@ ALTER USER PRIVILEGE ; The privileges that can be changed to are `read` or `write` without single quotes. -Note:there is another privilege `super`, which not allowed to be authorized to any user. +Note:there is another privilege `super`, which is not allowed to be authorized to any user. ## Show Users @@ -45,6 +45,6 @@ SHOW USERS; ``` :::note -In SQL syntax, `< >` means the part that needs to be input by user, excluding the `< >` itself. +In SQL syntax, `< >` means the part that needs to be input by the user, excluding the `< >` itself. ::: diff --git a/docs-en/13-operation/07-import.md b/docs-en/13-operation/07-import.md index 7077dcebf0986aca9ee67eb1e07c239ddbce7a17..8362cec1ab3072866018678b42a679d0c19b49de 100644 --- a/docs-en/13-operation/07-import.md +++ b/docs-en/13-operation/07-import.md @@ -2,26 +2,26 @@ title: Data Import --- -There are multiple ways of importing data provided byTDengine: import with script, import from data file, import using `taosdump`. +There are multiple ways of importing data provided by TDengine: import with script, import from data file, import using `taosdump`. ## Import Using Script -TDengine CLI `taos` supports `source ` command for executing the SQL statements in the file in batch. The SQL statements for creating databases, creating tables, and inserting rows can be written in single file with one statement on each line, then the file can be executed using `source` command in TDengine CLI `taos` to execute the SQL statements in order and in batch. In the script file, any line beginning with "#" is treated as comments and ignored silently. +TDengine CLI `taos` supports `source ` command for executing the SQL statements in the file in batch. The SQL statements for creating databases, creating tables, and inserting rows can be written in a single file with one statement on each line, then the file can be executed using the `source` command in TDengine CLI `taos` to execute the SQL statements in order and in batch. In the script file, any line beginning with "#" is treated as comments and ignored silently. ## Import from Data File -In TDengine CLI, data can be imported from a CSV file into an existing table. The data in single CSV must belong to same table and must be consistent with the schema of that table. The SQL statement is as below:也 +In TDengine CLI, data can be imported from a CSV file into an existing table. The data in a single CSV must belong to the same table and must be consistent with the schema of that table. The SQL statement is as below: ```sql insert into tb1 file 'path/data.csv'; ``` :::note -If there is description in the first line of a CSV file, please remove it before importing. If there is no value for a column, please use `NULL` without quotes. +If there is a description in the first line of the CSV file, please remove it before importing. If there is no value for a column, please use `NULL` without quotes. ::: -For example, there is a sub table d1001 whose schema is as below: +For example, there is a subtable d1001 whose schema is as below: ```sql taos> DESCRIBE d1001 @@ -49,7 +49,7 @@ The format of the CSV file to be imported, data.csv, is as below: '2018-10-12 06:38:05.000',18.30000,219,0.31000 ``` -Then, below SQL statement can be used to import data from file "data.csv", assuming the file is located under the home directory of current Linux user. +Then, the below SQL statement can be used to import data from file "data.csv", assuming the file is located under the home directory of the current Linux user. ```sql taos> insert into d1001 file '~/data.csv'; @@ -58,4 +58,4 @@ Query OK, 9 row(s) affected (0.004763s) ## Import using taosdump -A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). +A convenient tool for importing and exporting data is provided by TDengine, `taosdump`, which can be used to export data from one TDengine cluster and import into another one. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). diff --git a/docs-en/13-operation/08-export.md b/docs-en/13-operation/08-export.md index fa9625a7c5f6b0e6706d726bff410cee647286bb..5780de42faeaedbc1c985ad2aa2f52fe56c76971 100644 --- a/docs-en/13-operation/08-export.md +++ b/docs-en/13-operation/08-export.md @@ -2,11 +2,13 @@ title: Data Export --- -There are two ways of exporting data from a TDengine cluster, one is SQL statement in TDengine CLI, the other one is `taosdump`. +There are two ways of exporting data from a TDengine cluster: +- Using a SQL statement in TDengine CLI +- Using the `taosdump` tool ## Export Using SQL -If you want to export the data of a table or a STable, please execute below SQL statement in TDengine CLI. +If you want to export the data of a table or a STable, please execute the SQL statement below, in the TDengine CLI. ```sql select * from >> data.csv; @@ -16,4 +18,4 @@ The data of table or STable specified by `tb_name` will be exported into a file ## Export Using taosdump -With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). +With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump). diff --git a/docs-en/13-operation/09-status.md b/docs-en/13-operation/09-status.md index 3f3c6c9f1e86f9f33bafc7edfd79bebb175871cc..51396524ea281ae665c9fdf61d2e6e6202995537 100644 --- a/docs-en/13-operation/09-status.md +++ b/docs-en/13-operation/09-status.md @@ -3,7 +3,7 @@ sidebar_label: Connections & Tasks title: Manage Connections and Query Tasks --- -System operator can use TDengine CLI to show the connections, ongoing queries, stream computing, and can close connection or stop ongoing query task or stream computing. +A system operator can use the TDengine CLI to show connections, ongoing queries, stream computing, and can close connections or stop ongoing query tasks or stream computing. ## Show Connections @@ -13,7 +13,7 @@ SHOW CONNECTIONS; One column of the output of the above SQL command is "ip:port", which is the end point of the client. -## Close Connections Forcedly +## Force Close Connections ```sql KILL CONNECTION ; @@ -27,9 +27,9 @@ In the above SQL command, `connection-id` is from the first column of the output SHOW QUERIES; ``` -The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection, in format of "connection-id:query-no". +The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection. The format is "connection-id:query-no". -## Close Queries Forcedly +## Force Close Queries ```sql KILL QUERY ; @@ -43,12 +43,12 @@ In the above SQL command, `query-id` is from the first column of the output of ` SHOW STREAMS; ``` -The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection, in the format of "connection-id:stream-no". +The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection. The format is "connection-id:stream-no". -## Close Continuous Query Forcedly +## Force Close Continuous Query ```sql KILL STREAM ; ``` -The the above SQL command, `stream-id` is from the first column of the output of `SHOW STREAMS`. +The above SQL command, `stream-id` is from the first column of the output of `SHOW STREAMS`. diff --git a/docs-en/13-operation/10-monitor.md b/docs-en/13-operation/10-monitor.md index bb5d18b3b2fec3cd2a5e4ebc333537806699ce1d..a4679983f2bc77bb4e438f5d43fa1b8beb39b120 100644 --- a/docs-en/13-operation/10-monitor.md +++ b/docs-en/13-operation/10-monitor.md @@ -2,19 +2,19 @@ title: TDengine Monitoring --- -After TDengine is started, a database named `log` for monitoring is created automatically. The information about CPU, memory, disk, bandwidth, number of requests, disk I/O speed, slow query is written into `log` database on the basis of a predefined interval. Besides, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into `log` database too. System operator can view the data in `log` database from TDengine CLI or from a web console. +After TDengine is started, a database named `log` is created automatically to help with monitoring. Information that includes CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, is written into the `log` database at a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console. -Collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in configuration file. +The collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in the configuration file. ## TDinsight -TDinsight is a total solution which uses the monitor database `log` mentioned previously and Grafana to monitor a TDengine cluster. +TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster. From version 2.3.3.0, more monitoring data has been added in the `log` database. Please refer to [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) to learn more details about using TDinsight to monitor TDengine. -A script `TDinsight.sh` is provided to deploy TDinsight in automatic way. +A script `TDinsight.sh` is provided to deploy TDinsight automatically. -Download `TDinsight.sh` with below command: +Download `TDinsight.sh` with the below command: ```bash wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.sh @@ -38,7 +38,7 @@ There are two ways to setup Grafana alert notification. sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E ``` -- The AliClund SMS alert built in TDengine data source plugin can be enabled with parameter `-s`, the parameters of this way are as follows: +- The AliCloud SMS alert built in TDengine data source plugin can be enabled with parameter `-s`, the parameters of enabling this plugin are listed below: - `-I`: AliCloud SMS Key ID - `-K`: AliCloud SMS Key Secret @@ -47,7 +47,7 @@ There are two ways to setup Grafana alert notification. - `-T`: Input parameters in JSON format for the SMS notification template, for example`{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}` - `-B`: List of mobile numbers to be notified - Below is an example of the full command using this way. + Below is an example of the full command using the AliCloud SMS alert. ```bash sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \ @@ -55,6 +55,6 @@ There are two ways to setup Grafana alert notification. -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' ``` -Launch `TDinsight.sh` as above command and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`. +Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`. For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/). diff --git a/docs-en/13-operation/11-optimize.md b/docs-en/13-operation/11-optimize.md index 7cccfc8b0d51a4bfda9ae4827130a3747f10e649..69f8a49e4cf950fdf1e363e7a51aa9d888e22e04 100644 --- a/docs-en/13-operation/11-optimize.md +++ b/docs-en/13-operation/11-optimize.md @@ -2,19 +2,19 @@ title: Performance Optimization --- -After a TDengine cluster has been running for long enough time, because of updating data, deleting tables and deleting expired data, there may be fragments in data files and query performance may be impacted. To resolve the problem of fragments, from version 2.1.3.0 a new SQL command `COMPACT` can be used to defragment the data files. +After a TDengine cluster has been running for a long enough time, because of data insertion, table deletion and deletion of expired data, there may be fragments in data files and query performance may be impacted. To resolve the problem of fragments, since version 2.1.3.0 a new SQL command `COMPACT` can be used to defragment data files. ```sql COMPACT VNODES IN (vg_id1, vg_id2, ...) ``` -`COMPACT` can be used to defragment one or more vgroups. The defragmentation work will be put in task queue for scheduling execution by TDengine. `SHOW VGROUPS` command can be used to get the vgroup ids to be used in `COMPACT` command. There is a column `compacting` in the output of `SHOW GROUPS` to indicate the compacting status of the vgroup: 2 means the vgroup is waiting in task queue for compacting, 1 means compacting is in progress, and 0 means the vgroup has nothing to do with compacting. +`COMPACT` can be used to defragment one or more vgroups. The defragmentation work will be scheduled in the task queue for execution by TDengine. `SHOW VGROUPS` command can be used to get the vgroup ids to be used in `COMPACT` command. There is a column `compacting` in the output of `SHOW GROUPS` to indicate the compaction status of the vgroup: 2 means the vgroup is waiting in task queue for compaction, 1 means compaction is in progress, and 0 means the vgroup has not been scheduled for compaction. -Please be noted that a lot of disk I/O is required for defragementation operation, during which the performance may be impacted significantly for data insertion and query, data insertion may be blocked shortly in extreme cases. +Please note that a lot of disk I/O is required for defragementation operations. During defragmentation the performance may be impacted significantly for data insertion and query. Data insertion may even be blocked for very short periods, in extreme cases. ## Optimize Storage Parameters -The data in different use cases may have different characteristics, such as the days to keep, number of replicas, collection interval, record size, number of collection points, compression or not, etc. To achieve best efficiency in storage, the parameters in below table can be used, all of them can be either configured in `taos.cfg` as default configuration or in the command `create database`. For detailed definition of these parameters please refer to [Configuration Parameters](/reference/config/). +The data in different use cases may have different characteristics, such as the days to keep, number of replicas, collection interval, record size, number of collection points, compression or not, etc. To achieve best efficiency in storage, the parameters in the table below can be used. All of them can either be configured in `taos.cfg`, as default parameters, or can be set in the command `create database`. For detailed definition of these parameters please refer to [Configuration Parameters](/reference/config/). | # | Parameter | Unit | Definition | **Value Range** | **Default Value** | | --- | --------- | ---- | ------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | ----------------- | @@ -22,24 +22,24 @@ The data in different use cases may have different characteristics, such as the | 2 | keep | Day | The number of days the data is kept in the database | 1-36500 | 3650 | | 3 | cache | MB | The size of each memory block | 1-128 | 16 | | 4 | blocks | None | The number of memory blocks used by each vnode | 3-10000 | 6 | -| 5 | quorum | None | The number of required confirmation in case of multiple replicas | 1-2 | 1 | +| 5 | quorum | None | The number of required confirmations in case of multiple replicas | 1-2 | 1 | | 6 | minRows | None | The minimum number of rows in a data file | 10-1000 | 100 | -| 7 | maxRows | None | The maximum number of rows in a daa file | 200-10000 | 4096 | +| 7 | maxRows | None | The maximum number of rows in a data file | 200-10000 | 4096 | | 8 | comp | None | Whether to compress the data | 0:uncompressed; 1: One Phase compression; 2: Two Phase compression | 2 | | 9 | walLevel | None | wal sync level (named as "wal" in create database ) | 1:wal enabled without fsync; 2:wal enabled with fsync | 1 | -| 10 | fsync | ms | The time to wait for invoking fsync when walLevel is set to 2; 0 means no wait | 3000 | +| 10 | fsync | ms | The time to wait for invoking fsync when walLevel is set to 2; 0 means no wait | 0-3000 | | 11 | replica | none | The number of replications | 1-3 | 1 | | 12 | precision | none | Time precision | ms: millisecond; us: microsecond;ns: nanosecond | ms | -| 13 | update | none | Whether to allow updating data | 0: not allowed; 1: a row must be updated as whole; 2: a part of columns in a row can be updated | 0 | +| 13 | update | none | Whether to allow updating data | 0: not allowed; 1: a whole row must be updated; 2: a portion of columns in a row can be updated | 0 | | 14 | cacheLast | none | Whether the latest data of a table is cached in memory | 0: not cached; 1: the last row is cached; 2: the latest non-NULL value of each column is cached | 0 | -For a specific use case, there may be multiple kinds of data with different characteristics, it's best to put data with same characteristics in same database. So there may be multiple databases in a system while each database can be configured with different storage parameters to achieve best performance. The above parameters can be used when creating a database to override the default setting in configuration file. +Even for a specific use case, there may be multiple kinds of data with different characteristics. In this case it's best to put data with the same characteristics in the same database. There may be multiple databases in a system and each database can be configured with different storage parameters to achieve the best performance. The above parameters can be used when creating a database to override the default setting in the configuration file. ```sql CREATE DATABASE demo DAYS 10 CACHE 32 BLOCKS 8 REPLICA 3 UPDATE 1; ``` -The above SQL statement creates a database named as `demo`, in which each data file stores data across 10 days, the size of each memory block is 32 MB and each vnode is allocated with 8 blocks, the replica is set to 3, update operation is allowed, and all other parameters not specified in the command follow the default configuration in `taos.cfg`. +The above SQL statement creates a database named `demo`, in which each data file stores 10 days of data, the size of each memory block is 32 MB and 8 blocks are allocated to each vnode, there are 3 replicas and update operations are allowed. All other parameters not specified in the command, will default to the values in the configuration file `taos.cfg`. Once a database is created, only some parameters can be changed and be effective immediately while others are can't. @@ -67,7 +67,7 @@ Once a database is created, only some parameters can be changed and be effective **Explanation:** Prior to version 2.1.3.0, `taosd` server process needs to be restarted for these parameters to take in effect if they are changed using `ALTER DATABASE`. -When trying to join a new dnode into a running TDengine cluster, all the parameters related to cluster in the new dnode configuration must be consistent with the cluster, otherwise it can't join the cluster. The parameters that are checked when joining a dnode are as below. For detailed definition of these parameters please refer to [Configuration Parameters](/reference/config/). +When trying to join a new dnode into a running TDengine cluster, all the parameters related to the cluster in the new dnode configuration must be consistent with the cluster, otherwise it can't join the cluster. The parameters that are checked when joining a dnode are listed below. For detailed definition of these parameters please refer to [Configuration Parameters](/reference/config/). - numOfMnodes - mnodeEqualVnodeNum @@ -90,10 +90,10 @@ ALTER DNODE - dnode_id: from output of "SHOW DNODES" - config: the parameter to be changed, as below - - resetlog: close the old log file and create the new on + - resetlog: close the old log file and create the new one - debugFlag: 131 (INFO/ERROR/WARNING), 135 (DEBUG), 143 (TRACE) -For example +For example: ``` alter dnode 1 debugFlag 135; diff --git a/docs-en/13-operation/17-diagnose.md b/docs-en/13-operation/17-diagnose.md index 590e19cbbcd86e4ab30b251c6961adbdea40203c..2b474fddba4af5ba0c29103cd8ab1249d10d055b 100644 --- a/docs-en/13-operation/17-diagnose.md +++ b/docs-en/13-operation/17-diagnose.md @@ -4,19 +4,19 @@ title: Problem Diagnostics ## Network Connection Diagnostics -When the client is unable to access the server, the network connection between the client side and the server side needs to be checked to find out the root cause and resolve problems. +When a TDengine client is unable to access a TDengine server, the network connection between the client side and the server side must be checked to find the root cause and resolve problems. -The diagnostic for network connection can be executed between Linux and Linux or between Linux and Windows. +Diagnostics for network connections can be executed between Linux and Linux or between Linux and Windows. Diagnostic steps: -1. If the port range to be diagnosed are being occupied by a `taosd` server process, please firstly stop `taosd. -2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server. -3. On the client side, execute command `taos -n client -h -P -l ` to send testing package to the specified server and port. +1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd. +2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server". +3. On the client side, execute command `taos -n client -h -P -l ` to send a testing package to the specified server and port. --l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. Please be noted that the package length must be same in the above 2 commands executed on server side and client side respectively. +-l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. Please note that the package length must be same in the above 2 commands executed on server side and client side respectively. -Output of the server side is as below for example: +Output of the server side for the example is below: ```bash # taos -n server -P 6000 @@ -47,7 +47,7 @@ Output of the server side is as below for example: 12/21 14:50:22.721261 0x7f53427ec700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6011 ``` -Output of the client side is as below for example: +Output of the client side for the example is below: ```bash # taos -n client -h 172.27.0.7 -P 6000 @@ -65,13 +65,13 @@ Output of the client side is as below for example: 12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011 ``` -The output needs to be checked carefully for the system operator to find out root cause and solve the problem. +The output needs to be checked carefully for the system operator to find the root cause and resolve the problem. ## Startup Status and RPC Diagnostic -`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a comman task for a system operator to do to determine whether `taosd` has been started successfully, especially in case of cluster. +`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a common task which should be performed by a system operator, especially in the case of a cluster, to determine whether `taosd` has been started successfully. -`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or work abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's network problem or `taosd` is abnormal. +`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or whether `taosd` is abnormal. ## Sync and Arbitrator Diagnostic @@ -80,43 +80,43 @@ taos -n sync -P 6040 -h taos -n sync -P 6042 -h ``` -The above commands can be executed on Linux Shell to check whether the port for sync works well and whether the sync module of the server side works well. Besides, `-P 6042` is used to check whether the arbitrator is configured properly and works well. +The above commands can be executed in a Linux shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well. ## Network Speed Diagnostic `taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP` -From version 2.2.0.0, the above command can be executed on Linux Shell to test the network speed, it sends uncompressed package to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below: +From version 2.2.0.0 onwards, the above command can be executed in a Linux shell to test network speed. The command sends uncompressed packages to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below: --n:When set to "speed", it means testing network speed --h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used --P:The port of the server process to connect to, the default value is 6030 --N:The number of packages that will be sent in the test, range is [1,10000], default value is 100 --l:The size of each package in bytes, range is [1024, 1024 \* 1024 \* 1024], default value is 1024 --S:The type of network packages to send, can be either TCP or UDP, default value is +-n:When set to "speed", it means testing network speed. +-h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used. +-P:The port of the server process to connect to, the default value is 6030. +-N:The number of packages that will be sent in the test, range is [1,10000], default value is 100. +-l:The size of each package in bytes, range is [1024, 1024 \* 1024 \* 1024], default value is 1024. +-S:The type of network packages to send, can be either TCP or UDP, default value is TCP. ## FQDN Resolution Diagnostic `taos -n fqdn -h ` -From version 2.2.0.0, the above command can be executed on Linux Shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below: +From version 2.2.0.0 onward, the above command can be executed in a Linux shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below: --n:When set to "fqdn", it means testing the speed of resolving FQDN --h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default. +-n:When set to "fqdn", it means testing the speed of resolving FQDN. +-h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default. ## Server Log -The parameter `debugFlag` is used to control the log level of `taosd` server process. The default value is 131, for debug purpose it needs to be escalated to 135 or 143. +The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively. -Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily, so on server side important information is stored at different place from other logs.一 +Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily and so on the server side, important information is stored in a different place from other logs. - The log at level of INFO, WARNING and ERROR is stored in `taosinfo` so that it is easy to find important information - The log at level of DEBUG (135) and TRACE (143) and other information not handled by `taosinfo` are stored in `taosdlog` ## Client Log -An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugfalg` is also 131 and only log at level of INFO/ERROR/WARNING is recorded, it and needs to be changed to 135 or 143 so that log at DEBUG or TRACE level can be recorded for debugging purpose. +An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded. The maximum length of a single log file is controlled by parameter `numOfLogLines` and only 2 log files are kept for each `taosd` server process. -log file is written in async way to minimize the workload on disk, bu the penalty is that a few log lines may be lost in some extreme conditions. +Log files are written in an async way to minimize the workload on disk, but the trade off for performance is that a few log lines may be lost in some extreme conditions. diff --git a/docs-en/13-operation/index.md b/docs-en/13-operation/index.md index af3bf646e7aea5650df359d365707ba483ebd171..c64749c40e26f091e4a25e0238827ebceff4b069 100644 --- a/docs-en/13-operation/index.md +++ b/docs-en/13-operation/index.md @@ -2,9 +2,11 @@ title: Administration --- +This chapter is mainly written for system administrators. It covers download, install/uninstall, data import/export, system monitoring, user management, connection management, capacity planning and system optimization. + ```mdx-code-block import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-en/14-reference/02-rest-api/02-rest-api.mdx b/docs-en/14-reference/02-rest-api/02-rest-api.mdx index 93cec9a256341679b87a5d46fbd8059de2ef3dd4..990af861961e9daf4ac775462e21d6d9852d17c1 100644 --- a/docs-en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs-en/14-reference/02-rest-api/02-rest-api.mdx @@ -2,23 +2,23 @@ title: REST API --- -To support the development of various types of platforms, TDengine provides an API that conforms to the REST principle, namely REST API. To minimize the learning cost, different from the other database REST APIs, TDengine directly requests the SQL command contained in the request BODY through HTTP POST to operate the database and only requires a URL. +To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database. :::note -One difference from the native connector is that the REST interface is stateless, so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name prefix. (Since version 2.2.0.0, it is supported to specify db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default. And it requires that the `db_name` must be specified in the URL.) +One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. (Since version 2.2.0.0, TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default and it requires that the `db_name` must be specified in the URL.) ::: ## Installation -The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language supports the HTTP protocol is enough. +The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol. ## Verification If the TDengine server is already installed, it can be verified as follows: -The following is an Ubuntu environment using the `curl` tool (to confirm that it is installed) to verify that the REST interface is working. +The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment. -The following example lists all databases, replacing `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. +The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. ```html curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql @@ -89,7 +89,7 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60 TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication. -- The custom authentication information is as follows (Let's introduce token later) +- The custom authentication information is as follows. More details about "token" later. ``` Authorization: Taosd @@ -136,7 +136,7 @@ The return result is in JSON format, as follows: Description: -- status: tell if the operation result is success or failure. +- status: tells you whethre the operation result is success or failure. - head: the definition of the table, or just one column "affected_rows" if no result set is returned. (As of version 2.0.17.0, it is recommended not to rely on the head return value to determine the data column type but rather use column_meta. In later versions, the head item may be removed from the return value.) - column_meta: this item is added to the return value to indicate the data type of each column in the data with version 2.0.17.0 and later versions. Each column is described by three values: column name, column type, and type length. For example, `["current",6,4]` means that the column name is "current", the column type is 6, which is the float type, and the type length is 4, which is the float type with 4 bytes. If the column type is binary or nchar, the type length indicates the maximum length of content stored in the column, not the length of the specific data in this return value. When the column type is nchar, the type length indicates the number of Unicode characters that can be saved, not bytes. - data: The exact data returned, presented row by row, or just [[affected_rows]] if no result set is returned. The order of the data columns in each row of data is the same as that of the data columns described in column_meta. @@ -271,7 +271,7 @@ When the HTTP request URL uses `/rest/sqlutc`, the timestamp of the returned res curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc ``` -Respones body: +Response body: ```json { diff --git a/docs-en/14-reference/03-connector/03-connector.mdx b/docs-en/14-reference/03-connector/03-connector.mdx index b4bb5ea1745efa415c8b75f0781ecf77c8d2e236..44685579005c2cebd5e0194a10d457cd1199051e 100644 --- a/docs-en/14-reference/03-connector/03-connector.mdx +++ b/docs-en/14-reference/03-connector/03-connector.mdx @@ -4,7 +4,7 @@ title: Connector TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector. -![image-connector](/img/connector.png) +![TDengine Database image-connector](./connector.webp) ## Supported platforms @@ -46,7 +46,7 @@ Comparing the connector support for TDengine functional features as follows. | -------------- | -------- | ---------- | ------ | ------ | ----------- | -------- | | **Connection Management** | Support | Support | Support | Support | Support | Support | | **Regular Query** | Support | Support | Support | Support | Support | Support | -| **Continous Query** | Support | Support | Support | Support | Support | Support | +| **Continuous Query** | Support | Support | Support | Support | Support | Support | | **Parameter Binding** | Support | Support | Support | Support | Support | Support | | **Subscription** | Support | Support | Support | Support | Support | Not Supported | | **Schemaless** | Support | Support | Support | Support | Support | Support | diff --git a/docs-en/14-reference/03-connector/_preparition.mdx b/docs-en/14-reference/03-connector/_preparation.mdx similarity index 80% rename from docs-en/14-reference/03-connector/_preparition.mdx rename to docs-en/14-reference/03-connector/_preparation.mdx index 906fd3b66cd8743cfbe9481ed5c4b14d16dba070..07ebdbca3d891ff51a254bc1b83016f1404bb47e 100644 --- a/docs-en/14-reference/03-connector/_preparition.mdx +++ b/docs-en/14-reference/03-connector/_preparation.mdx @@ -2,7 +2,7 @@ :::info -Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installtion package](/get-started/). For Windows development, you need to install the corresponding [Windows client](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) for TDengine. +Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding [Windows client](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) for TDengine. - libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately. - taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately. diff --git a/docs-en/14-reference/03-connector/_windows_install.mdx b/docs-en/14-reference/03-connector/_windows_install.mdx index c050509ed5b29d55c2fefca0cba68e7784498642..2819be615ee0a80da9f0324d8d41e9b247e8a7f6 100644 --- a/docs-en/14-reference/03-connector/_windows_install.mdx +++ b/docs-en/14-reference/03-connector/_windows_install.mdx @@ -25,7 +25,7 @@ import PkgList from "/components/PkgList"; :::tip -1. If you use FQDN to connect to the server, you must ensure the local network environment DNS is configured, or add FQDN addressing records in the `hosts` file, e.g., edit C:\Windows\system32\drivers\etc\hosts and add a record like the following: `192.168.1.99 h1.tados.com`.. +1. If you use FQDN to connect to the server, you must ensure the local network environment DNS is configured, or add FQDN addressing records in the `hosts` file, e.g., edit C:\Windows\system32\drivers\etc\hosts and add a record like the following: `192.168.1.99 h1.taosd.com`.. 2. Uninstall: Run unins000.exe to uninstall the TDengine client driver. ::: diff --git a/docs-en/14-reference/03-connector/connector.webp b/docs-en/14-reference/03-connector/connector.webp new file mode 100644 index 0000000000000000000000000000000000000000..040cf5c26c726b345b2e0e5363dd3c677bec61be Binary files /dev/null and b/docs-en/14-reference/03-connector/connector.webp differ diff --git a/docs-en/14-reference/03-connector/cpp.mdx b/docs-en/14-reference/03-connector/cpp.mdx index 3a934bda51277582a0df931dc7643516156b4390..d13a74384ccc99b4200f89cdba98e5ba902e41f8 100644 --- a/docs-en/14-reference/03-connector/cpp.mdx +++ b/docs-en/14-reference/03-connector/cpp.mdx @@ -4,7 +4,7 @@ sidebar_label: C/C++ title: C/C++ Connector --- -C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use it, you need to include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs; the application also needs to link to the corresponding dynamic libraries on the platform where it is located. +C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located. ```c #include @@ -26,7 +26,7 @@ Please refer to [list of supported platforms](/reference/connector#supported-pla ## Supported versions -The version number of the TDengine client driver and the version number of the TDengine server require one-to-one correspondence and recommend using the same version of client driver as what the TDengine server version is. Although a lower version of the client driver is compatible to work with a higher version of the server, if the first three version numbers are the same (i.e., only the fourth version number is different), but it is not recommended. It is strongly discouraged to use a higher version of the client driver to access a lower version of the TDengine server. +The version number of the TDengine client driver and the version number of the TDengine server should be the same. A lower version of the client driver is compatible with a higher version of the server, if the first three version numbers are the same (i.e., only the fourth version number is different). For e.g. if the client version is x.y.z.1 and the server version is x.y.z.2 the client and server are compatible. But in general we do not recommend using a lower client version with a newer server version. It is also strongly discouraged to use a higher version of the client driver to access a lower version of the TDengine server. ## Installation steps @@ -55,7 +55,7 @@ In the above example code, `taos_connect()` establishes a connection to port 603 :::note -- If not specified, when the return value of the API is an integer, _0_ means success, the others are error codes representing the reason for failure, and when the return value is a pointer, _NULL_ means failure. +- If not specified, when the return value of the API is an integer, _0_ means success. All others are error codes representing the reason for failure. When the return value is a pointer, _NULL_ means failure. - All error codes and their corresponding causes are described in the `taoserror.h` file. ::: @@ -140,13 +140,12 @@ The base API is used to do things like create database connections and provide a - `void taos_cleanup()` - Clean up the runtime environment and should be called before the application exits. + Cleans up the runtime environment and should be called before the application exits. - ` int taos_options(TSDB_OPTION option, const void * arg, ...) ` Set client options, currently supports region setting (`TSDB_OPTION_LOCALE`), character set -(`TSDB_OPTION_CHARSET`), time zone -(`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`) . The region setting, character set, and time zone default to the current settings of the operating system. +(`TSDB_OPTION_CHARSET`), time zone (`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`). The region setting, character set, and time zone default to the current settings of the operating system. - `char *taos_get_client_info()` @@ -159,8 +158,8 @@ The base API is used to do things like create database connections and provide a - host: FQDN of any node in the TDengine cluster - user: user name - pass: password - - db: database name, if the user does not provide, it can also be connected correctly, the user can create a new database through this connection, if the user provides the database name, it means that the database user has already created, the default use of the database - - port: the port the tasd program is listening on + - db: the database name. Even if the user does not provide this, the connection will still work correctly. The user can create a new database through this connection. If the user provides the database name, it means that the database has already been created and the connection can be used for regular operations on the database. + - port: the port the taosd program is listening on NULL indicates a failure. The application needs to save the returned parameters for subsequent use. @@ -187,7 +186,7 @@ The APIs described in this subsection are all synchronous interfaces. After bein - `TAOS_RES* taos_query(TAOS *taos, const char *sql)` - Executes an SQL command, either a DQL, DML, or DDL statement. The `taos` parameter is a handle obtained with `taos_connect()`. You can't tell if the result failed by whether the return value is `NULL`, but by parsing the error code in the result set with the `taos_errno()` function. + Executes an SQL command, either a DQL, DML, or DDL statement. The `taos` parameter is a handle obtained with `taos_connect()`. If the return value is `NULL` this does not necessarily indicate a failure. You can get the error code, if any, by parsing the error code in the result set with the `taos_errno()` function. - `int taos_result_precision(TAOS_RES *res)` @@ -215,7 +214,7 @@ The APIs described in this subsection are all synchronous interfaces. After bein - ` TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)` - Gets the properties of each column of the query result set (column name, column data type, column length), used in conjunction with `taos_num_fileds()` to parse a tuple (one row) of data returned by `taos_fetch_row()`. The structure of `TAOS_FIELD` is as follows. + Gets the properties of each column of the query result set (column name, column data type, column length), used in conjunction with `taos_num_fields()` to parse a tuple (one row) of data returned by `taos_fetch_row()`. The structure of `TAOS_FIELD` is as follows. ```c typedef struct taosField { @@ -231,7 +230,7 @@ typedef struct taosField { - ` void taos_free_result(TAOS_RES *res)` - Frees the query result set and the associated resources. Be sure to call this API to free the resources after the query is completed. Otherwise, it may lead to a memory leak in the application. However, note that the application will crash if you call a function like `taos_consume()` to get the query results after freeing the resources. + Frees the query result set and the associated resources. Be sure to call this API to free the resources after the query is completed. Failing to call this, may lead to a memory leak in the application. However, note that the application will crash if you call a function like `taos_consume()` to get the query results after freeing the resources. - `char *taos_errstr(TAOS_RES *res)` @@ -242,7 +241,7 @@ typedef struct taosField { Get the reason for the last API call failure. The return value is the error code. :::note -TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, etc., issued based on TAOS structures are multi-thread safe, but state quantities such as "USE statement" may interfere between threads. In addition, the C connector can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection. +TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, and other operations issued that are based on TAOS structures are multi-thread safe, but state quantities such as the "USE statement" may interfere between threads. In addition, the C connector can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection. ::: @@ -274,12 +273,12 @@ All TDengine's asynchronous APIs use a non-blocking call pattern. Applications c ### Parameter Binding API -In addition to direct calls to `taos_query()` to perform queries, TDengine also provides a set of `bind` APIs that supports parameter binding, similar in style to MySQL, and currently only supports using a question mark `? ` to represent the parameter to be bound. +In addition to direct calls to `taos_query()` to perform queries, TDengine also provides a set of `bind` APIs that supports parameter binding, similar in style to MySQL. TDengine currently only supports using a question mark `? ` to represent the parameter to be bound. -Starting with versions 2.1.1.0 and 2.1.2.0, TDengine has significantly improved the bind APIs to support for data writing (INSERT) scenarios. This avoids the resource consumption of SQL syntax parsing when writing data through the parameter binding interface, thus significantly improving write performance in most cases. A typical operation, in this case, is as follows. +Starting with versions 2.1.1.0 and 2.1.2.0, TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. This avoids the resource consumption of SQL syntax parsing when writing data through the parameter binding interface, thus significantly improving write performance in most cases. A typical operation, in this case, is as follows. 1. call `taos_stmt_init()` to create the parameter binding object. -2. call `taos_stmt_prepare()` to parse the INSERT statement. 3. +2. call `taos_stmt_prepare()` to parse the INSERT statement. 3. call `taos_stmt_set_tbname()` to set the table name if it is reserved in the INSERT statement but not the TAGS. 4. call `taos_stmt_set_tbname_tags()` to set the table name and TAGS values if the table name and TAGS are reserved in the INSERT statement (for example, if the INSERT statement takes an automatic table build). 5. call `taos_stmt_bind_param_batch()` to set the value of VALUES in multiple columns, or call `taos_stmt_bind_param()` to set the value of VALUES in a single row. @@ -383,7 +382,7 @@ In addition to writing data using the SQL method or the parameter binding API, w **return value** TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`. In some cases, the returned TAOS_RES is `NULL`, and it is still possible to call `taos_errno()` to safely get the error code information. - The returned TAOS_RES needs to be freed by the caller. Otherwise, a memory leak will occur. + The returned TAOS_RES needs to be freed by the caller in order to avoid memory leaks. **Description** The protocol type is enumerated and contains the following three formats. @@ -416,13 +415,13 @@ The Subscription API currently supports subscribing to one or more tables and co This function is responsible for starting the subscription service, returning the subscription object on success and `NULL` on failure, with the following parameters. - - taos: the database connection that has been established - - restart: if the subscription already exists, whether to restart or continue the previous subscription - - topic: the topic of the subscription (i.e., the name). This parameter is the unique identifier of the subscription - - sql: the query statement of the subscription, this statement can only be _select_ statement, only the original data should be queried, only the data can be queried in time order - - fp: the callback function when the query result is received (the function prototype will be introduced later), only used when called asynchronously. This parameter should be passed `NULL` when called synchronously - - param: additional parameter when calling the callback function, the system API will pass it to the callback function as it is, without any processing - - interval: polling period in milliseconds. The callback function will be called periodically according to this parameter when called asynchronously. not recommended to set this parameter too small To avoid impact on system performance when called synchronously. If the interval between two calls to `taos_consume()` is less than this period, the API will block until the interval exceeds this period. + - taos: the database connection that has been established. + - restart: if the subscription already exists, whether to restart or continue the previous subscription. + - topic: the topic of the subscription (i.e., the name). This parameter is the unique identifier of the subscription. + - sql: the query statement of the subscription which can only be a _select_ statement. Only the original data should be queried, and data can only be queried in temporal order. + - fp: the callback function when the query result is received only used when called asynchronously. This parameter should be passed `NULL` when called synchronously. The function prototype is described below. + - param: additional parameter when calling the callback function. The system API will pass it to the callback function as is, without any processing. + - interval: polling period in milliseconds. The callback function will be called periodically according to this parameter when called asynchronously. The interval should not be too small to avoid impact on system performance when called synchronously. If the interval between two calls to `taos_consume()` is less than this period, the API will block until the interval exceeds this period. - ` typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` diff --git a/docs-en/14-reference/03-connector/csharp.mdx b/docs-en/14-reference/03-connector/csharp.mdx index 99e185c86765bd0f3cf3a6b32e91403bedec4152..5eb322cf9125fe036349de22ceea5988de46e404 100644 --- a/docs-en/14-reference/03-connector/csharp.mdx +++ b/docs-en/14-reference/03-connector/csharp.mdx @@ -8,18 +8,18 @@ title: C# Connector import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -import Preparition from "./_preparition.mdx" -import CSInsert from "../../04-develop/03-insert-data/_cs_sql.mdx" -import CSInfluxLine from "../../04-develop/03-insert-data/_cs_line.mdx" -import CSOpenTSDBTelnet from "../../04-develop/03-insert-data/_cs_opts_telnet.mdx" -import CSOpenTSDBJson from "../../04-develop/03-insert-data/_cs_opts_json.mdx" -import CSQuery from "../../04-develop/04-query-data/_cs.mdx" -import CSAsyncQuery from "../../04-develop/04-query-data/_cs_async.mdx" +import Preparation from "./_preparation.mdx" +import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx" +import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx" +import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx" +import CSOpenTSDBJson from "../../07-develop/03-insert-data/_cs_opts_json.mdx" +import CSQuery from "../../07-develop/04-query-data/_cs.mdx" +import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx" `TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data. -The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc. The `TDengine.Connector` currently does not provide a REST connection interface. Developers can write their RESTful application by referring to the [RESTful APIs](https://docs.taosdata.com//reference/restful-api/) documentation. +The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc. The `TDengine.Connector` currently does not provide a REST connection interface. Developers can write their RESTful application by referring to the [REST API](/reference/rest-api/) documentation. This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying. @@ -35,7 +35,7 @@ Please refer to [version support list](/reference/connector#version-support) ## Supported features -1. Connection Mmanagement +1. Connection Management 2. General Query 3. Continuous Query 4. Parameter Binding @@ -179,11 +179,11 @@ namespace TDengineExample 1. "Unable to establish connection", "Unable to resolve FQDN" - Usually, it cause by the FQDN configuration is incorrect, you can refer to [How to understand TDengine's FQDN (Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html) to solve it. 2. + Usually, it's caused by an incorrect FQDN configuration. Please refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot. -Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found. +2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found. - This is usually because the program did not find the dependent client driver. The solution is to copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32\` directory on Windows, and create the following softlink on Linux `ln -s /usr/local/taos/driver/libtaos.so.x.x .x.x /usr/lib/libtaos.so` will work. + This is usually because the program did not find the dependent client driver. The solution is to copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32\` directory on Windows, and create the following soft link on Linux `ln -s /usr/local/taos/driver/libtaos.so.x.x .x.x /usr/lib/libtaos.so` will work. ## API Reference diff --git a/docs-en/14-reference/03-connector/go.mdx b/docs-en/14-reference/03-connector/go.mdx index 5d6a08e6520879fd766952fd0a6cab13416615fe..c1e85ae4eb1d1d7ccfb70b2b4f38cebaf6cbf06c 100644 --- a/docs-en/14-reference/03-connector/go.mdx +++ b/docs-en/14-reference/03-connector/go.mdx @@ -8,16 +8,16 @@ title: TDengine Go Connector import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -import Preparition from "./_preparition.mdx" -import GoInsert from "../../04-develop/03-insert-data/_go_sql.mdx" -import GoInfluxLine from "../../04-develop/03-insert-data/_go_line.mdx" -import GoOpenTSDBTelnet from "../../04-develop/03-insert-data/_go_opts_telnet.mdx" -import GoOpenTSDBJson from "../../04-develop/03-insert-data/_go_opts_json.mdx" -import GoQuery from "../../04-develop/04-query-data/_go.mdx" +import Preparation from "./_preparation.mdx" +import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx" +import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx" +import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx" +import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx" +import GoQuery from "../../07-develop/04-query-data/_go.mdx" -`driver-go` is the official Go language connector for TDengine, which implements the interface to the Go language [database/sql](https://golang.org/pkg/database/sql/) package. Go developers can use it to develop applications that access TDengine cluster data. +`driver-go` is the official Go language connector for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data. -`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from the native connection. +`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection. This article describes how to install `driver-go` and connect to TDengine clusters and perform basic operations such as data query and data writing through `driver-go`. @@ -213,7 +213,7 @@ func main() { Since the REST interface is stateless, the `use db` syntax will not work. You need to put the db name into the SQL command, e.g. `create table if not exists tb1 (ts timestamp, a int)` to `create table if not exists test.tb1 (ts timestamp, a int)` otherwise it will report the error `[0x217] Database not specified or available`. -You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter in TDengine 2.4.0.5. is supported since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. +You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. The complete example is as follows. @@ -289,7 +289,7 @@ func main() { 6. `readBufferSize` parameter has no significant effect after being increased - If you increase `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value according to the actual situation to achieve the best query result. + Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. 7. `disableCompression` parameter is set to `false` when the query efficiency is reduced diff --git a/docs-en/14-reference/03-connector/java.mdx b/docs-en/14-reference/03-connector/java.mdx index 1be0360aa24d2b0ec2239c708d0597950003e6cb..33d715c2e218fd6db4f61882f2a7a92baa80f5a2 100644 --- a/docs-en/14-reference/03-connector/java.mdx +++ b/docs-en/14-reference/03-connector/java.mdx @@ -9,19 +9,19 @@ description: TDengine Java based on JDBC API and provide both native and REST co import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). REST connections implement has a slight differences to compare the set of features implemented and native connections. +'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). The implementation of the REST connection and those of the native connections have slight differences in features. -![tdengine-connector](tdengine-jdbc-connector.png) +![TDengine Database tdengine-connector](tdengine-jdbc-connector.webp) The preceding diagram shows two ways for a Java app to access TDengine via connector: - JDBC native connection: Java applications use TSDBDriver on physical node 1 (pnode1) to call client-driven directly (`libtaos.so` or `taos.dll`) APIs to send writing and query requests to taosd instances located on physical node 2 (pnode2). -- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server of physical node 2 (taosAdapter), requests TDengine server through the REST server, and returns the result. +- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server (taosAdapter) on physical node 2. taosAdapter forwards the request to TDengine server and returns the result. -Using REST connection, which does not rely on TDengine client drivers.It can be cross-platform more convenient and flexible but introduce about 30% lower performance than native connection. +The REST connection, which does not rely on TDengine client drivers, is more convenient and flexible, in addition to being cross-platform. However the performance is about 30% lower than that of the native connection. :::info -TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases, so 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. You need to pay attention to the following points when using: +TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases. So 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. It is important to keep the following points in mind: - TDengine does not currently support delete operations for individual data records. - Transactional operations are not currently supported. @@ -88,7 +88,7 @@ Add following dependency in the `pom.xml` file of your Maven project: -You can build Java connector from source code after clone TDengine project: +You can build Java connector from source code after cloning the TDengine project: ```shell git clone https://github.com/taosdata/TDengine.git @@ -96,7 +96,7 @@ cd TDengine/src/connector/jdbc mvn clean install -Dmaven.test.skip=true ``` -After compilation, a jar package of taos-jdbcdriver-2.0.XX-dist .jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository. +After compilation, a jar package named taos-jdbcdriver-2.0.XX-dist.jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository. @@ -186,7 +186,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl); In the above example, a RestfulDriver with a JDBC REST connection is used to establish a connection to a database named `test` with hostname `taosdemo.com` on port `6041`. The URL specifies the user name as `root` and the password as `taosdata`. -There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required: 1. +There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required: 1. driverClass specified as "com.taosdata.jdbc.rs.RestfulDriver". 2. jdbcUrl starting with "jdbc:TAOS-RS://". @@ -206,10 +206,10 @@ The configuration parameters in the URL are as follows. - Unlike the native connection method, the REST interface is stateless. When using the JDBC REST connection, you need to specify the database name of the table and super table in SQL. For example. ```sql -INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6); +INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); ``` -- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into t1 using weather(ts, temperature) tags('beijing') values(now, 24.6); +- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into test using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); ::: @@ -254,7 +254,7 @@ In the above example, a connection is established to `taosdemo.com`, port is 603 The configuration parameters in properties are as follows. - TSDBDriver.PROPERTY_KEY_USER: Login TDengine user name, default value 'root'. -- TSDBDriver.PROPERTY_KEY_PASSWORD: user login password, default value 'tasdata'. +- TSDBDriver.PROPERTY_KEY_PASSWORD: user login password, default value 'taosdata'. - TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true: pull the result set in batch when executing query; false: pull the result set row by row. The default value is: false. - TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true: when executing executeBatch of Statement, if there is a SQL execution failure in the middle, continue to execute the following sq. false: no longer execute any statement after the failed SQL. The default value is: false. - TSDBDriver.PROPERTY_KEY_CONFIG_DIR: Only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS. @@ -271,7 +271,7 @@ If the configuration parameters are duplicated in the URL, Properties, or client 2. Properties connProps 3. the configuration file taos.cfg of the TDengine client driver when using a native connection -For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously. In this case, JDBC will use the password in the URL to establish the connection. +For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously, JDBC will use the password in the URL to establish the connection. ## Usage examples @@ -323,7 +323,7 @@ while(resultSet.next()){ } ``` -> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, starting from 1, it is recommended to use the field names to get them. +> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. ### Handling exceptions @@ -565,7 +565,7 @@ public class ParameterBindingDemo { // set table name pstmt.setTableName("t5_" + i); // set tags - pstmt.setTagNString(0, "北京-abc"); + pstmt.setTagNString(0, "California-abc"); // set columns ArrayList tsList = new ArrayList<>(); @@ -576,7 +576,7 @@ public class ParameterBindingDemo { ArrayList f1List = new ArrayList<>(); for (int j = 0; j < numOfRow; j++) { - f1List.add("北京-abc"); + f1List.add("California-abc"); } pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE); @@ -623,7 +623,7 @@ public void setNString(int columnIndex, ArrayList list, int size) throws ### Schemaless Writing -Starting with version 2.2.0.0, TDengine has added the ability to schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details. +Starting with version 2.2.0.0, TDengine has added the ability to perform schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details. **Note**. @@ -635,7 +635,7 @@ public class SchemalessInsertTest { private static final String host = "127.0.0.1"; private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"; private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0"; - private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"Beijing\", \"id\": \"d1001\"}}"; + private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; public static void main(String[] args) throws SQLException { final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; @@ -666,16 +666,16 @@ The TDengine Java Connector supports subscription functionality with the followi #### Create subscriptions ```java -TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); +TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topicname", "select * from meters", false); ``` The three parameters of the `subscribe()` method have the following meanings. -- topic: the subscribed topic (i.e., name). This parameter is the unique identifier of the subscription -- sql: the query statement of the subscription, this statement can only be `select` statement, only the original data should be queried, and you can query only the data in the positive time order +- topicname: the name of the subscribed topic. This parameter is the unique identifier of the subscription. +- sql: the query statement of the subscription. This statement can only be a `select` statement. Only original data can be queried, and you can query the data only temporal order. - restart: if the subscription already exists, whether to restart or continue the previous subscription -The above example will use the SQL command `select * from meters` to create a subscription named `topic`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning. +The above example will use the SQL command `select * from meters` to create a subscription named `topicname`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning. #### Subscribe to consume data @@ -742,7 +742,7 @@ Example usage is as follows. //query or insert // ... - connection.close(); // put back to conneciton pool + connection.close(); // put back to connection pool } ``` @@ -774,7 +774,7 @@ public static void main(String[] args) throws Exception { //query or insert // ... - connection.close(); // put back to conneciton pool + connection.close(); // put back to connection pool } ``` diff --git a/docs-en/14-reference/03-connector/node.mdx b/docs-en/14-reference/03-connector/node.mdx index 34cb08fcf8e090da0de021c102acf45e26ecb08c..8f586acde4848af71efcb23358be1f8486cedb8e 100644 --- a/docs-en/14-reference/03-connector/node.mdx +++ b/docs-en/14-reference/03-connector/node.mdx @@ -8,13 +8,12 @@ title: TDengine Node.js Connector import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -import Preparition from "./_preparition.mdx"; -import NodeInsert from "../../04-develop/03-insert-data/_js_sql.mdx"; -import NodeInfluxLine from "../../04-develop/03-insert-data/_js_line.mdx"; -import NodeOpenTSDBTelnet from "../../04-develop/03-insert-data/_js_opts_telnet.mdx"; -import NodeOpenTSDBJson from "../../04-develop/03-insert-data/_js_opts_json.mdx"; -import NodeQuery from "../../04-develop/04-query-data/_js.mdx"; -import NodeAsyncQuery from "../../04-develop/04-query-data/_js_async.mdx"; +import Preparation from "./_preparation.mdx"; +import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx"; +import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; +import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; +import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; +import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; `td2.0-connector` and `td2.0-rest-connector` are the official Node.js language connectors for TDengine. Node.js developers can develop applications to access TDengine instance data. @@ -78,7 +77,7 @@ Manually install the following tools. - Install [Python](https://www.python.org/downloads/) 2.7 (`v3.x.x` is not supported) and execute `npm config set python python2.7`. - Go to the `cmd` command-line interface, `npm config set msvs_version 2017` -Refer to Microsoft's Node.js User Manual [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows- environment. md#compiling-native-addon-modules). +Refer to Microsoft's Node.js User Manual [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules). If using ARM64 Node.js on Windows 10 ARM, you must add "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64". @@ -189,14 +188,8 @@ let cursor = conn.cursor(); ### Query data -#### Synchronous queries - -#### asynchronous query - - - ## More Sample Programs | Sample Programs | Sample Program Description | @@ -232,7 +225,7 @@ See [video tutorial](https://www.taosdata.com/blog/2020/11/11/1957.html) for the 2. "Unable to establish connection", "Unable to resolve FQDN" - Usually, root cause is the FQDN is not configured correctly. You can refer to [How to understand TDengine's FQDN (In Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html). + Usually, the root cause is an incorrect FQDN configuration. You can refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot. ## Important Updates diff --git a/docs-en/14-reference/03-connector/python.mdx b/docs-en/14-reference/03-connector/python.mdx index 99a84f657b1a71e904c251c38e5e623c896bdf95..69eec2388d460754493d2b775f14ab4bbf129799 100644 --- a/docs-en/14-reference/03-connector/python.mdx +++ b/docs-en/14-reference/03-connector/python.mdx @@ -11,22 +11,22 @@ import TabItem from "@theme/TabItem"; `taospy` is the official Python connector for TDengine. `taospy` provides a rich set of APIs that makes it easy for Python applications to access TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/). -The connection to the server directly using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection". +The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection". The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). ## Supported Platforms -- The native connection [supported platforms](/reference/connector/#supported-platforms) is the same as the one supported by the TDengine client. +- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client. - REST connections are supported on all platforms that can run Python. ## Version selection -We recommend using the latest version of `taospy`, regardless what the version of TDengine is. +We recommend using the latest version of `taospy`, regardless of the version of TDengine. ## Supported features -- Native connections support all the core features of TDeingine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing. +- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing. - REST connections support features such as connection management and SQL execution. (SQL execution allows you to: manage databases, tables, and supertables, write data, query data, create continuous queries, etc.). ## Installation @@ -34,7 +34,7 @@ We recommend using the latest version of `taospy`, regardless what the version o ### Preparation 1. Install Python. Python >= 3.6 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it. -2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip docuemntation](https://pip.pypa.io/en/stable/installation/) to install it. +2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it. If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI. @@ -53,7 +53,7 @@ Earlier TDengine client software includes the Python connector. If the Python co ::: -#### to install `taospy` +#### To install `taospy` @@ -139,7 +139,7 @@ The FQDN above can be the FQDN of any dnode in the cluster, and the PORT is the -For REST connections and making sure the cluster is up, make sure the taosAdapter component is up. This can be tested using the following `curl ` command. +For REST connections, make sure the cluster and taosAdapter component, are running. This can be tested using the following `curl ` command. ``` curl -u root:taosdata http://:/rest/sql -d "select server_version()" @@ -200,8 +200,8 @@ The `connect()` function returns a `taos.TaosConnection` instance. In client-sid All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified. - `host`: The host to connect to. The default is localhost. -- `user`: TDenigne user name. The default is `root`. -- `password`: TDeingine user password. The default is `taosdata`. +- `user`: TDengine user name. The default is `root`. +- `password`: TDengine user password. The default is `taosdata`. - `port`: The port on which the taosAdapter REST service listens. Default is 6041. - `timeout`: HTTP request timeout in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed. @@ -312,7 +312,7 @@ For a more detailed description of the `sql()` method, please refer to [RestClie ### Exception handling -All database operations will be thrown directly if an exception occurs. The application is responsible for exception handling. For example: +All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example: ```python {{#include docs-examples/python/handle_exception.py}} @@ -320,7 +320,7 @@ All database operations will be thrown directly if an exception occurs. The appl ### About nanoseconds -Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. +Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. 1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds 2. https://www.python.org/dev/peps/pep-0564/ @@ -328,7 +328,7 @@ Due to the current imperfection of Python's nanosecond support (see link below), ## Frequently Asked Questions -Welcome to [ask questions or report questions] (https://github.com/taosdata/taos-connector-python/issues). +Welcome to [ask questions or report questions](https://github.com/taosdata/taos-connector-python/issues). ## Important Update diff --git a/docs-en/14-reference/03-connector/rust.mdx b/docs-en/14-reference/03-connector/rust.mdx index 6989e80638cec471c6e10e21529928e2133632f2..cd54f35982ec13fc3c9160145fa002fb6f1d094b 100644 --- a/docs-en/14-reference/03-connector/rust.mdx +++ b/docs-en/14-reference/03-connector/rust.mdx @@ -8,12 +8,12 @@ title: TDengine Rust Connector import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -import Preparition from "./_preparition.mdx" -import RustInsert from "../../04-develop/03-insert-data/_rust_sql.mdx" -import RustInfluxLine from "../../04-develop/03-insert-data/_rust_line.mdx" -import RustOpenTSDBTelnet from "../../04-develop/03-insert-data/_rust_opts_telnet.mdx" -import RustOpenTSDBJson from "../../04-develop/03-insert-data/_rust_opts_json.mdx" -import RustQuery from "../../04-develop/04-query-data/_rust.mdx" +import Preparation from "./_preparation.mdx" +import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx" +import RustInfluxLine from "../../07-develop/03-insert-data/_rust_line.mdx" +import RustOpenTSDBTelnet from "../../07-develop/03-insert-data/_rust_opts_telnet.mdx" +import RustOpenTSDBJson from "../../07-develop/03-insert-data/_rust_opts_json.mdx" +import RustQuery from "../../07-develop/04-query-data/_rust.mdx" `libtaos` is the official Rust language connector for TDengine. Rust developers can develop applications to access the TDengine instance data. @@ -30,7 +30,7 @@ REST connections are supported on all platforms that can run Rust. Please refer to [version support list](/reference/connector#version-support). -The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. Recommend to use TDengine version 2.4 or higher to avoid known issues. +The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 2.4 or higher to avoid known issues. ## Installation @@ -206,7 +206,7 @@ let conn: Taos = cfg.connect(); ### Connection pooling -In complex applications, recommand to enable connection pool. Connection pool for [libtaos] is implemented using [r2d2]. +In complex applications, we recommend enabling connection pools. Connection pool for [libtaos] is implemented using [r2d2]. As follows, a connection pool with default parameters can be generated. @@ -269,7 +269,7 @@ The [Taos] structure is the connection manager in [libtaos] and provides two mai Note that Rust asynchronous functions and an asynchronous runtime are required. -[Taos] provides partial Rust methodization of SQL to reduce the frequency of `format!` code blocks. +[Taos] provides a few Rust methods that encapsulate SQL to reduce the frequency of `format!` code blocks. - `.describe(table: &str)`: Executes `DESCRIBE` and returns a Rust data structure. - `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement. @@ -279,7 +279,7 @@ In addition, this structure is also the entry point for [Parameter Binding](#Par ### Bind Interface -Similar to the C interface, Rust provides the bind interface's wraping. First, create a bind object [Stmt] for a SQL command from the [Taos] object. +Similar to the C interface, Rust provides the bind interface's wrapping. First, create a bind object [Stmt] for a SQL command from the [Taos] object. ```rust let mut stmt: Stmt = taos.stmt("insert into ? values(? ,?)") ? ; diff --git a/docs-en/14-reference/03-connector/tdengine-jdbc-connector.png b/docs-en/14-reference/03-connector/tdengine-jdbc-connector.png deleted file mode 100644 index 1cb8401ea30b01d8db652ed4ea70ecc511de7461..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/03-connector/tdengine-jdbc-connector.png and /dev/null differ diff --git a/docs-en/14-reference/03-connector/tdengine-jdbc-connector.webp b/docs-en/14-reference/03-connector/tdengine-jdbc-connector.webp new file mode 100644 index 0000000000000000000000000000000000000000..37cf6d90a528e320d5cb7d6da502d3a5b10aa4ee Binary files /dev/null and b/docs-en/14-reference/03-connector/tdengine-jdbc-connector.webp differ diff --git a/docs-en/14-reference/04-taosadapter.md b/docs-en/14-reference/04-taosadapter.md index 85fd2923b02189d6f3cfd73efff784d12c3bb69a..3264124655e7040e1d94b43500a0b582d95cb5a1 100644 --- a/docs-en/14-reference/04-taosadapter.md +++ b/docs-en/14-reference/04-taosadapter.md @@ -24,21 +24,21 @@ taosAdapter provides the following features. ## taosAdapter architecture diagram -![taosAdapter Architecture](taosAdapter-architecture.png) +![TDengine Database taosAdapter Architecture](taosAdapter-architecture.webp) ## taosAdapter Deployment Method ### Install taosAdapter -taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TAOSData official website](https://taosdata.com/en/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. +taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation. -### start/stop taosAdapter +### Start/Stop taosAdapter On Linux systems, the taosAdapter service is managed by `systemd` by default. You can use the command `systemctl start taosadapter` to start the taosAdapter service and use the command `systemctl stop taosadapter` to stop the taosAdapter service. ### Remove taosAdapter -Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package or use package management command like rpm or apt to remove the TDengine server, including taosAdapter. +Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package. If you installed using a .deb or .rpm package, use the corresponding command, for your package manager, like apt or rpm to remove the TDengine server, including taosAdapter. ### Upgrade taosAdapter @@ -153,8 +153,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl ## Feature List -- Compatible with RESTful interfaces - [https://www.taosdata.com/cn/documentation/connector#restful](https://www.taosdata.com/cn/documentation/connector#restful) +- Compatible with RESTful interfaces [REST API](/reference/rest-api/) - Compatible with InfluxDB v1 write interface [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/) - Compatible with OpenTSDB JSON and telnet format writes @@ -187,7 +186,7 @@ You can use any client that supports the http protocol to write data to or query ### InfluxDB -You can use any client that supports the http protocol to access the Restful interface address `http://:6041/` to write data in InfluxDB compatible format to TDengine. The EndPoint is as follows: +You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in InfluxDB compatible format to TDengine. The EndPoint is as follows: ```text /influxdb/v1/write @@ -204,7 +203,7 @@ Note: InfluxDB token authorization is not supported at present. Only Basic autho ### OpenTSDB -You can use any client that supports the http protocol to access the Restful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine. +You can use any client that supports the http protocol to access the RESTful interface address `http://:6041/` to write data in OpenTSDB compatible format to TDengine. ```text /opentsdb/v1/put/json/:db @@ -241,7 +240,7 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne ## Memory usage optimization methods -taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values range from -1 to 100 integers in percent of the system's physical memory. +taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory. - pauseQueryMemoryThreshold - pauseAllMemoryThreshold @@ -277,7 +276,7 @@ Corresponding configuration parameter monitor.pauseQueryMemoryThreshold memory threshold for no more queries Environment variable `TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD` (default 70) ``` -You can adjust it according to the specific application scenario and operation strategy, and it is recommended to use operation monitoring software to monitor system memory status timely. The load balancer can also check the taosAdapter running status through this interface. +You should adjust this parameter based on your specific application scenario and operation strategy. We recommend using monitoring software to monitor system memory status. The load balancer can also check the taosAdapter running status through this interface. ## taosAdapter Monitoring Metrics @@ -326,7 +325,7 @@ You can also adjust the level of the taosAdapter log output by setting the `--lo ## How to migrate from older TDengine versions to taosAdapter -In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its process ID. And there are some configuration parameters and behaviors that are different between the two. See the following table for details. +In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its own process ID. There are some configuration parameters and behaviors that are different between the two. See the following table for details. | **#** | **embedded httpd** | **taosAdapter** | **comment** | | ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------ ------------------------------------------------------------------------ | diff --git a/docs-en/14-reference/05-taosbenchmark.md b/docs-en/14-reference/05-taosbenchmark.md index 54b4b406fc8c6895a6dad70643e8452c2be9ef37..93d6041a124e453bfc212447c566ea79b323e881 100644 --- a/docs-en/14-reference/05-taosbenchmark.md +++ b/docs-en/14-reference/05-taosbenchmark.md @@ -95,7 +95,7 @@ taosBenchmark -f ## Command-line arguments in detail - **-f/--file ** : - specify the configuration file to use. This file includes All parameters. And users should not use this parameter with other parameters on the command-line. There is no default value. + specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value. - **-c/--config-dir ** : specify the directory of the TDengine cluster configuration file. the default path is `/etc/taos`. @@ -276,9 +276,9 @@ The parameters for creating super tables are configured in `super_tables` in the - **auto_create_table**: only when insert_mode is taosc, rest, stmt, and childtable_exists is "no". "yes" means taosBenchmark will automatically create non-existent tables when inserting data; "no" means that taosBenchmark will create all tables before inserting. -- **batch_create_tbl_num** : the number of tables per batch when creating sub-tables, default is 10. Note: the actual number of batches may not be the same as this value when the executed SQL statement is larger than the maximum length supported, it will be automatically truncated and re-executed to continue creating. +- **batch_create_tbl_num** : the number of tables per batch when creating sub-tables, default is 10. Note: the actual number of batches may not be the same as this value. If the executed SQL statement is larger than the maximum length supported, it will be automatically truncated and re-executed to continue creating. -- **data_source**: specify the source of data-generating. Default is taosBenchmark randomly generated. Users can configure it as "rand" and "sample". When "sample" is used, taosBenchmark will use the data in the file specified by the `sample_file` parameter. +- **data_source**: specify the source of data-generation. Default is taosBenchmark randomly generated. Users can configure it as "rand" and "sample". When "sample" is used, taosBenchmark will use the data in the file specified by the `sample_file` parameter. - **insert_mode**: insertion mode with options taosc, rest, stmt, sml, sml-rest, corresponding to normal write, restful interface write, parameter binding interface write, schemaless interface write, restful schemaless interface write (provided by taosAdapter). The default value is taosc. @@ -300,15 +300,15 @@ The parameters for creating super tables are configured in `super_tables` in the - **partial_col_num**: If this value is a positive number n, only the first n columns are written to, only if insert_mode is taosc and rest, or all columns if n is 0. -- **disorder_ratio** : Specifies the percentage probability of disordered data in the value range [0,50]. The default is 0, which means there is no disorder data. +- **disorder_ratio** : Specifies the percentage probability of disordered (i.e. out-of-order) data in the value range [0,50]. The default is 0, which means there is no disorder data. -- **disorder_range** : Specifies the timestamp fallback range for the disordered data. The generated disorder timestamp is the timestamp that should be used in the non-disorder case minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0. +- **disorder_range** : Specifies the timestamp fallback range for the disordered data. The disordered timestamp is generated by subtracting a random value in this range, from the timestamp that would be used in the non-disorder case. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0. -- **timestamp_step**: The timestamp step for inserting data in each child table, in units consistent with the `precision` of the database, the default value is 1. +- **timestamp_step**: The timestamp step for inserting data in each child table, in units consistent with the `precision` of the database. For e.g. if the `precision` is milliseconds, the timestamp step will be in milliseconds. The default value is 1. - **start_timestamp** : The timestamp start value of each sub-table, the default value is now. -- **sample_format**: The type of the sample data file, now only "csv" is supported. +- **sample_format**: The type of the sample data file; for now only "csv" is supported. - **sample_file**: Specify a CSV format file as the data source. It only works when data_source is a sample. If the number of rows in the CSV file is less than or equal to prepared_rand, then taosBenchmark will read the CSV file data cyclically until it is the same as prepared_rand; otherwise, taosBenchmark will read only the rows with the number of prepared_rand. The final number of rows of data generated is the smaller of the two. @@ -341,7 +341,7 @@ The configuration parameters for specifying super table tag columns and data col - **create_table_thread_count** : The number of threads to build the table, default is 8. -- **connection_pool_size** : The number of pre-established connections to the TDengine server. If not configured, it is the same number of threads specified. +- **connection_pool_size** : The number of pre-established connections to the TDengine server. If not configured, it is the same as number of threads specified. - **result_file** : The path to the result output file, the default value is . /output.txt. diff --git a/docs-en/14-reference/06-taosdump.md b/docs-en/14-reference/06-taosdump.md index 973999704b595ea9b742f1ef759f973aa1f05649..5403e40925f633ce62795cc6037fc8c8f7aad07a 100644 --- a/docs-en/14-reference/06-taosdump.md +++ b/docs-en/14-reference/06-taosdump.md @@ -1,25 +1,25 @@ --- title: taosdump -description: "taosdump is a tool application that supports backing up data from a running TDengine cluster and restoring the backed up data to the same or another running TDengine cluster." +description: "taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster." --- ## Introduction -taosdump is a tool application that supports backing up data from a running TDengine cluster and restoring the backed up data to the same or another running TDengine cluster. +taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster. taosdump can back up a database, a super table, or a normal table as a logical data unit or backup data records in the database, super tables, and normal tables. When using taosdump, you can specify the directory path for data backup. If you do not specify a directory, taosdump will back up the data to the current directory by default. -Suppose the specified location already has data files. In that case, taosdump will prompt the user and exit immediately to avoid data overwriting which means that the same path can only be used for one backup. -Please be careful if you see a prompt for this. +If the specified location already has data files, taosdump will prompt the user and exit immediately to avoid data overwriting. This means that the same path can only be used for one backup. + +Please be careful if you see a prompt for this and please ensure that you follow best practices and relevant SOPs for data integrity, backup and data security. -taosdump is a logical backup tool and should not be used to back up any raw data, environment settings, Users should not use taosdump to back up raw data, environment settings, hardware information, server configuration, or cluster topology. taosdump uses [Apache AVRO](https://avro.apache.org/) as the data file format to store backup data. ## Installation There are two ways to install taosdump: -- Install the taosTools official installer. Please find taosTools from [All download links](https://www.taosdata.com/all-downloads) page and download and install it. +- Install the taosTools official installer. Please find taosTools from [All download links](https://www.tdengine.com/all-downloads) page and download and install it. - Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details. @@ -28,14 +28,14 @@ There are two ways to install taosdump: ### taosdump backup data 1. backing up all databases: specify `-A` or `-all-databases` parameter. -2. backup multiple specified databases: use `-D db1,db2,... ` parameters; 3. +2. backup multiple specified databases: use `-D db1,db2,... ` parameters; 3. back up some super or normal tables in the specified database: use `-dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces. 4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter. -5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use This can reduce the backup data time and backup data footprint if table names, column names, and tag names do not use `escape character`. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters. +5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters. :::tip - taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema. -- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ..." can be tried by challenging the `-B` parameter to a smaller value. +- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ...", then try changing the `-B` parameter to a smaller value. ::: @@ -44,7 +44,7 @@ There are two ways to install taosdump: Restore the data file in the specified path: use the `-i` parameter plus the path to the data file. You should not use the same directory to backup different data sets, and you should not backup the same data set multiple times in the same path. Otherwise, the backup data will cause overwriting or multiple backups. :::tip -taosdump internally uses TDengine stmt binding API for writing recovery data and currently uses 16384 as one write batch for better data recovery performance. If there are more columns in the backup data, it may cause a "WAL size exceeds limit" error. You can try to adjust to a smaller value by using the `-B` parameter. +taosdump internally uses TDengine stmt binding API for writing recovery data with a default batch size of 16384 for better data recovery performance. If there are more columns in the backup data, it may cause a "WAL size exceeds limit" error. You can try to adjust the batch size to a smaller value by using the `-B` parameter. ::: @@ -59,7 +59,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...] or: taosdump [OPTION...] -i inpath or: taosdump [OPTION...] -o outpath - -h, --host=HOST Server host dumping data from. Default is + -h, --host=HOST Server host from which to dump data. Default is localhost. -p, --password User password to connect to server. Default is taosdata. @@ -72,10 +72,10 @@ Usage: taosdump [OPTION...] dbname [tbname ...] -r, --resultFile=RESULTFILE DumpOut/In Result file path and name. -a, --allow-sys Allow to dump system database -A, --all-databases Dump all databases. - -D, --databases=DATABASES Dump inputted databases. Use comma to separate - databases' name. + -D, --databases=DATABASES Dump listed databases. Use comma to separate + database names. -N, --without-property Dump database without its properties. - -s, --schemaonly Only dump tables' schema. + -s, --schemaonly Only dump table schemas. -y, --answer-yes Input yes for prompt. It will skip data file checking! -d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy, @@ -98,7 +98,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...] and try. The workable value is related to the length of the row and type of table schema. -I, --inspect inspect avro file content and print on screen - -L, --loose-mode Using loose mode if the table name and column name + -L, --loose-mode Use loose mode if the table name and column name use letter and number only. Default is NOT. -n, --no-escape No escape char '`'. Default is using it. -T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png deleted file mode 100644 index 4708f836feb21980f2db7fed4a55f799b23a6ec1..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp new file mode 100644 index 0000000000000000000000000000000000000000..a78e18028a94c2f6a783b08d992a25c791527407 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png deleted file mode 100644 index f2684e6eed70e8f56697eae42b495d6bd62815e8..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp new file mode 100644 index 0000000000000000000000000000000000000000..b152418d0902b8ebdf62ebce6705c10dd5ab4fbf Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png deleted file mode 100644 index 74686691e4106b8646c3deee1e0ce73b2f53f1ea..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp new file mode 100644 index 0000000000000000000000000000000000000000..f58f48b7f17375cb8e62e7c0126ca3aea56a13f6 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.png deleted file mode 100644 index 27964215567f9f961c0aeaf1b863188437008fb7..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp new file mode 100644 index 0000000000000000000000000000000000000000..00afcce013602dce0da17bfd033f65aaa8e43bb7 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.png deleted file mode 100644 index b0d3abbf21ec4d4bd7bfb95fcc03a5f936b22665..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp new file mode 100644 index 0000000000000000000000000000000000000000..567e5694f9d7a035a3eb354493d3df8ed64db251 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png deleted file mode 100644 index 2b54cbeb83bcff12f20461a4f57f882e2073f231..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp new file mode 100644 index 0000000000000000000000000000000000000000..cc8a912810f35e53a6e5fa96ea0c81e334ffc0df Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-6-dnode-usage.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png deleted file mode 100644 index eb3848657f13900c856ac595c20766465157e9c4..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp new file mode 100644 index 0000000000000000000000000000000000000000..651b716bc511ba2ed5db5e6fc6b0591ef150cbf6 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-7-login-history.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadaper.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadaper.png deleted file mode 100644 index d94b2e02ac9855bb3d2f77d8902e068839db364f..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadaper.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp new file mode 100644 index 0000000000000000000000000000000000000000..8666193f59497180574fd2786266e5baabbe9761 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.png b/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.png deleted file mode 100644 index 654df2934597ce600a1dc2dcd0cab7e29de7076d..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.webp b/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.webp new file mode 100644 index 0000000000000000000000000000000000000000..7f38a76a2b899ffebc7aecd39c8ec4fd0b2da778 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/TDinsight-full.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.png b/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.png deleted file mode 100644 index e3afa22c0326d70567ec4529c83101c746daac87..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.webp b/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.webp new file mode 100644 index 0000000000000000000000000000000000000000..3d7fe932a23f3720e76e4217a7b5d1868d81fac8 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-manager-status.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.png b/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.png deleted file mode 100644 index 198bf37141c86a66cdd91b47a331bcdeb83daaf8..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.webp b/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.webp new file mode 100644 index 0000000000000000000000000000000000000000..517123954efe4b94485fdab2e07be0d765f5daa2 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-notification-channel.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.png b/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.png deleted file mode 100644 index ace3aa3c2f8f14fabdac54bc25ae2d9449445b69..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.webp b/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.webp new file mode 100644 index 0000000000000000000000000000000000000000..6666296ac16e7a0c0ab3db23f0517f2089d09035 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-query-demo.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png b/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png deleted file mode 100644 index 7082e49f6beb8690c36f98a3f4ff2befdb8fd014..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp b/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp new file mode 100644 index 0000000000000000000000000000000000000000..6f74bc3a47a32de661ef25f787a947d823715810 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-rule-condition-notifications.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.png b/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.png deleted file mode 100644 index ffd4911b53854c42dbf0ff11838cb604fa694138..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.webp b/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.webp new file mode 100644 index 0000000000000000000000000000000000000000..acda3b24a6263815ac8b658709d2172300ca3b00 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/alert-rule-test.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.png b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.png deleted file mode 100644 index 802c7366f921301bd7fbc62458e56b2d1eaf195c..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp new file mode 100644 index 0000000000000000000000000000000000000000..903e236e2a776dfef7f85c014662e8913a9033a5 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-button.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png deleted file mode 100644 index 019ec921b6f808671f4f864ddf3380159d4a0dcc..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp new file mode 100644 index 0000000000000000000000000000000000000000..14fcfe9d183e8804199708ae4492d0904a7c9d62 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-tdengine.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.png b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.png deleted file mode 100644 index 3963abb4ea8ae0e6f5557466f7a5b746c2d2ea3c..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp new file mode 100644 index 0000000000000000000000000000000000000000..00b50cc619b030d1fb2be3a367183901d5c833e8 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource-test.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.png b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.png deleted file mode 100644 index 837100464b35a5cafac474723aef603f91945ebc..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.webp b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.webp new file mode 100644 index 0000000000000000000000000000000000000000..06d0ff6ed50091a6340508bc5b2b3f78b65dcb18 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-add-datasource.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.png b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.png deleted file mode 100644 index 98223df25499effac343ff5723544a3c289f18fa..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.webp b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.webp new file mode 100644 index 0000000000000000000000000000000000000000..e2ec052b91e439a817f6e88b8afd0fcb4dcb7ef8 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-display.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png deleted file mode 100644 index 07aba348f02b4fb8ef68e79664920c119b842d4c..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp new file mode 100644 index 0000000000000000000000000000000000000000..665c035f9755b9472aee33cd61d3ab52831194b5 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-dashboard-import-options.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.png b/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.png deleted file mode 100644 index 7e28939ead8bf3b6e2b4330e4f9b59c2e39b5c1c..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.webp b/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..7dc42eeba919fee7b438a453c00bb9fd0ac2d274 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/howto-import-dashboard.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.png b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.png deleted file mode 100644 index 981f640b14d18aa6f0682768d8405a232df500f6..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.webp b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.webp new file mode 100644 index 0000000000000000000000000000000000000000..7ef081900f8de99c859193b69d49b3d6bc187909 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-15167.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png deleted file mode 100644 index 94ef4fa5fe63e535118a81707b413c028ce01f70..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp new file mode 100644 index 0000000000000000000000000000000000000000..602452fc4c89424d8e17d46d74949b69be84dbe8 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/import-dashboard-for-tdengine.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png b/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png deleted file mode 100644 index 670cacc377c2801fa9437c3c132c5c7fbc361b0f..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp b/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp new file mode 100644 index 0000000000000000000000000000000000000000..35a3ebba781f24dbb0066993d1ca2f02659997d2 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/import-via-grafana-dot-com.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import_dashboard.png b/docs-en/14-reference/07-tdinsight/assets/import_dashboard.png deleted file mode 100644 index d74cd36c96ee0fd24ddc6feae2da07824816f745..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/import_dashboard.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/import_dashboard.webp b/docs-en/14-reference/07-tdinsight/assets/import_dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..fb7958f1b9fbd43c8f63136024842790e711c490 Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/import_dashboard.webp differ diff --git a/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.png b/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.png deleted file mode 100644 index 0101e7430cb2ef673818de8bd3af53d0d082ad3f..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.png and /dev/null differ diff --git a/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.webp b/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..49f1d88f4ad93286cd8582536e82b4dcc4ff271b Binary files /dev/null and b/docs-en/14-reference/07-tdinsight/assets/tdengine_dashboard.webp differ diff --git a/docs-en/14-reference/07-tdinsight/index.md b/docs-en/14-reference/07-tdinsight/index.md index 553ae48b285f39a1fb29ebe946cb9b949adf9664..cebfafa225e6e8de75ff84bb51fa664784177910 100644 --- a/docs-en/14-reference/07-tdinsight/index.md +++ b/docs-en/14-reference/07-tdinsight/index.md @@ -1,23 +1,23 @@ --- -title: TDinsight - 基于Grafana的TDengine零依赖监控解决方案 +title: TDinsight - Grafana-based Zero-Dependency Monitoring Solution for TDengine sidebar_label: TDinsight --- -TDinsight 是使用 [TDengine] 原生监控数据库和 [Grafana] 对 TDengine 进行监控的解决方案。 +TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana]. -TDengine 启动后,会自动创建一个监测数据库 log,并自动将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库,并对重要的系统操作(比如登录、创建、删除数据库等)以及各种错误报警信息进行记录。通过 [Grafana] 和 [TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases),TDinsight 将集群状态、节点信息、插入及查询请求、资源使用情况等进行可视化展示,同时还支持 vnode、dnode、mnode 节点状态异常告警,为开发者实时监控 TDengine 集群运行状态提供了便利。本文将指导用户安装 Grafana 服务器并通过 `TDinsight.sh` 安装脚本自动安装 TDengine 数据源插件及部署 TDinsight 可视化面板。 +After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, vnode, dnode, and mnode status, exception alerts and many other metrics. This is very convenient for developers who want to monitor TDengine cluster status in real-time. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel using the `TDinsight.sh` installation script. -## 系统要求 +## System Requirements -要部署 TDinsight,需要一个单节点的 TDengine 服务器或一个多节点的 [TDengine] 集群,以及一个[Grafana]服务器。此仪表盘需要 TDengine 2.3.3.0 及以上,并启用 `log` 数据库(`monitor = 1`)。 +To deploy TDinsight, a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`). -## 安装 Grafana +## Installing Grafana -我们建议在此处使用最新的[Grafana] 7 或 8 版本。您可以在任何[支持的操作系统](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems)中,按照 [Grafana 官方文档安装说明](https://grafana.com/docs/grafana/latest/installation/) 安装 [Grafana]。 +We recommend using the latest [Grafana] version 7 or 8 here. You can install Grafana on any [supported operating system](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems) by following the [official Grafana documentation Instructions](https://grafana.com/docs/grafana/latest/installation/) to install [Grafana]. -### 在 Debian 或 Ubuntu 上安装 Grafana +### Installing Grafana on Debian or Ubuntu -对于 Debian 或 Ubuntu 操作系统,建议使用 Grafana 镜像仓库。使用如下命令从零开始安装: +For Debian or Ubuntu operating systems, we recommend the Grafana image repository and using the following command to install from scratch. ```bash sudo apt-get install -y apt-transport-https @@ -30,9 +30,9 @@ sudo apt-get update sudo apt-get install grafana ``` -### 在 CentOS / RHEL 上安装 Grafana +### Install Grafana on CentOS / RHEL -您可以从官方 YUM 镜像仓库安装。 +You can install it from its official YUM repository. ```bash sudo tee /etc/yum.repos.d/grafana.repo << EOF @@ -49,7 +49,7 @@ EOF sudo yum install grafana ``` -或者用 RPM 安装: +Or install it with RPM package. ```bash wget https://dl.grafana.com/oss/release/grafana-7.5.11-1.x86_64.rpm @@ -59,30 +59,31 @@ sudo yum install \ https://dl.grafana.com/oss/release/grafana-7.5.11-1.x86_64.rpm ``` -## 自动部署 TDinsight +## Automated deployment of TDinsight -我们提供了一个自动化安装脚本 [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) 脚本以便用户快速进行安装配置。 +We provide an installation script [`TDinsight.sh`](https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh) to allow users to configure the installation automatically and quickly. -您可以通过 `wget` 或其他工具下载该脚本: +You can download the script via `wget` or other tools: ```bash wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh chmod +x TDinsight.sh +./TDinsight.sh ``` -这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://grafana.com/grafana/dashboards/15167) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。利用该脚本提供的告警设置选项,你还可以获得内置的阿里云短信告警通知支持。 +This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications. -假设您在同一台主机上使用 TDengine 和 Grafana 的默认服务。运行 `./TDinsight.sh` 并打开 Grafana 浏览器窗口就可以看到 TDinsight 仪表盘了。 +Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard. -下面是 TDinsight.sh 的用法说明: +The following is a description of TDinsight.sh usage. -```bash +```text Usage: ./TDinsight.sh ./TDinsight.sh -h|--help ./TDinsight.sh -n -a -u -p -Install and configure TDinsight dashboard in Grafana on ubuntu 18.04/20.04 system. +Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 system. -h, -help, --help Display help @@ -92,108 +93,108 @@ Install and configure TDinsight dashboard in Grafana on ubuntu 18.04/20.04 syste -P, --grafana-provisioning-dir Grafana provisioning directory, [default: /etc/grafana/provisioning/] -G, --grafana-plugins-dir Grafana plugins directory, [default: /var/lib/grafana/plugins] --O, --grafana-org-id Grafana orgnization id. [default: 1] +-O, --grafana-org-id Grafana organization id. [default: 1] -n, --tdengine-ds-name TDengine datasource name, no space. [default: TDengine] -a, --tdengine-api TDengine REST API endpoint. [default: http://127.0.0.1:6041] -u, --tdengine-user TDengine user name. [default: root] -p, --tdengine-password TDengine password. [default: taosdata] --i, --tdinsight-uid Replace with a non-space ascii code as the dashboard id. [default: tdinsight] +-i, --tdinsight-uid Replace with a non-space ASCII code as the dashboard id. [default: tdinsight] -t, --tdinsight-title Dashboard title. [default: TDinsight] -e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false] -E, --external-notifier Apply external notifier uid to TDinsight dashboard. -Aliyun SMS as Notifier: --s, --sms-enabled To enable tdengine-datasource plugin builtin aliyun sms webhook. +Alibaba Cloud SMS as Notifier: +-s, --sms-enabled To enable tdengine-datasource plugin builtin Alibaba Cloud SMS webhook. -N, --sms-notifier-name Provisioning notifier name.[default: TDinsight Builtin SMS] -U, --sms-notifier-uid Provisioning notifier uid, use lowercase notifier name by default. -D, --sms-notifier-is-default Set notifier as default. --I, --sms-access-key-id Aliyun sms access key id --K, --sms-access-key-secret Aliyun sms access key secret +-I, --sms-access-key-id Alibaba Cloud SMS access key id +-K, --sms-access-key-secret Alibaba Cloud SMS access key secret -S, --sms-sign-name Sign name -C, --sms-template-code Template code --T, --sms-template-param Template param, a escaped json string like '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' +-T, --sms-template-param Template param, a escaped JSON string like '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}' -B, --sms-phone-numbers Comma-separated numbers list, eg "189xxxxxxxx,132xxxxxxxx" -L, --sms-listen-addr [default: 127.0.0.1:9100] ``` -大多数命令行选项都可以通过环境变量获得同样的效果。 - -| 短选项 | 长选项 | 环境变量 | 说明 | -| ------ | -------------------------- | ---------------------------- | --------------------------------------------------------------------------- | -| -v | --plugin-version | TDENGINE_PLUGIN_VERSION | TDengine 数据源插件版本,默认使用最新版。 | -| -P | --grafana-provisioning-dir | GF_PROVISIONING_DIR | Grafana 配置目录,默认为`/etc/grafana/provisioning/` | -| -G | --grafana-plugins-dir | GF_PLUGINS_DIR | Grafana 插件目录,默认为`/var/lib/grafana/plugins`。 | -| -O | --grafana-org-id | GF_ORG_ID | Grafana 组织 ID,默认为 1。 | -| -n | --tdengine-ds-name | TDENGINE_DS_NAME | TDengine 数据源名称,默认为 TDengine。 | -| -a | --tdengine-api | TDENGINE_API | TDengine REST API 端点。默认为`http://127.0.0.1:6041`。 | -| -u | --tdengine-user | TDENGINE_USER | TDengine 用户名。 [默认值:root] | -| -p | --tdengine-密码 | TDENGINE_PASSWORD | TDengine 密码。 [默认:taosdata] | -| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight 仪表盘`uid`。 [默认值:tdinsight] | -| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight 仪表盘标题。 [默认:TDinsight] | -| -e | --tdinsight-可编辑 | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值:false] | -| -E | --external-notifier | EXTERNAL_NOTIFIER | 将外部通知程序 uid 应用于 TDinsight 仪表盘。 | -| -s | --sms-enabled | SMS_ENABLED | 启用阿里云短信 webhook 内置的 tdengine-datasource 插件。 | -| -N | --sms-notifier-name | SMS_NOTIFIER_NAME | 供应通知程序名称。[默认:`TDinsight Builtin SMS`] | -| -U | --sms-notifier-uid | SMS_NOTIFIER_UID | "Notification Channel" `uid`,默认使用程序名称的小写,其他字符用 “-” 代替。 | -| -D | --sms-notifier-is-default | SMS_NOTIFIER_IS_DEFAULT | 将内置短信通知设置为默认值。 | -| -I | --sms-access-key-id | SMS_ACCESS_KEY_ID | 阿里云短信访问密钥 id | -| -K | --sms-access-key-secret | SMS_ACCESS_KEY_SECRET | 阿里云短信访问秘钥 | -| -S | --sms-sign-name | SMS_SIGN_NAME | 签名 | -| -C | --sms-template-code | SMS_TEMPLATE_CODE | 模板代码 | -| -T | --sms-template-param | SMS_TEMPLATE_PARAM | 模板参数的 JSON 模板 | -| -B | --sms-phone-numbers | SMS_PHONE_NUMBERS | 逗号分隔的手机号列表,例如`"189xxxxxxxx,132xxxxxxxx"` | -| -L | --sms-listen-addr | SMS_LISTEN_ADDR | 内置 sms webhook 监听地址,默认为`127.0.0.1:9100` | - -假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本: +Most command-line options can take effect the same as environment variables. + +| Short Options | Long Options | Environment Variables | Description | +| ------ | -------------------------- | ---------------------------- | ------------------------------------------------------------------ --------- | +| -v | --plugin-version | TDENGINE_PLUGIN_VERSION | The TDengine data source plugin version, the latest version is used by default. | -P +| -P | --grafana-provisioning-dir | GF_PROVISIONING_DIR | The Grafana configuration directory, defaults to `/etc/grafana/provisioning/` | +| -G | --grafana-plugins-dir | GF_PLUGINS_DIR | The Grafana plugin directory, defaults to `/var/lib/grafana/plugins`. | -O +| -O | --grafana-org-id | GF_ORG_ID | The Grafana organization ID, default is 1. | +| -n | --tdengine-ds-name | TDENGINE_DS_NAME | The name of the TDengine data source, defaults to TDengine. | -a | --tdengine-ds-name | The name of the TDengine data source, defaults to TDengine. +| -a | --tdengine-api | TDENGINE_API | The TDengine REST API endpoint. Defaults to `http://127.0.0.1:6041`. | -u +| -u | --tdengine-user | TDENGINE_USER | TDengine username. [default: root] | +| -p | --tdengine-password | TDENGINE_PASSWORD | TDengine password. [default: tadosdata] | -i | --tdengine-password +| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight `uid` of the dashboard. [default: tdinsight] | +| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight dashboard title. [Default: TDinsight] | -e | -tdinsight-title +| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | If the dashboard is configured to be editable. [Default: false] | -e | --external +| -E | --external-notifier | EXTERNAL_NOTIFIER | Apply the external notifier uid to the TDinsight dashboard. | -s +| -s | --sms-enabled | SMS_ENABLED | Enable the tdengine-datasource plugin built into Alibaba Cloud SMS webhook. | -s +| -N | --sms-notifier-name | SMS_NOTIFIER_NAME | The name of the provisioning notifier. [Default: `TDinsight Builtin SMS`] | -U +| -U | --sms-notifier-uid | SMS_NOTIFIER_UID | "Notification Channel" `uid`, lowercase of the program name is used by default, other characters are replaced by "-". |-sms +| -D | --sms-notifier-is-default | SMS_NOTIFIER_IS_DEFAULT | Set built-in SMS notification to default value. |-sms-notifier-is-default +| -I | --sms-access-key-id | SMS_ACCESS_KEY_ID | Alibaba Cloud SMS access key id | +| -K | --sms-access-key-secret | SMS_ACCESS_KEY_SECRET | AliCloud SMS-access-secret-key | +| -S | --sms-sign-name | SMS_SIGN_NAME | Signature | +| -C | --sms-template-code | SMS_TEMPLATE_CODE | Template code | +| -T | --sms-template-param | SMS_TEMPLATE_PARAM | JSON template for template parameters | +| -B | --sms-phone-numbers | SMS_PHONE_NUMBERS | A comma-separated list of phone numbers, e.g. `"189xxxxxxxx,132xxxxxxxx"` | +| -L | --sms-listen-addr | SMS_LISTEN_ADDR | Built-in SMS webhook listener address, default is `127.0.0.1:9100` | + +Suppose you start a TDengine database on host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script. ```bash -sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord +sudo . /TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord ``` -我们提供了一个“-E”选项,用于从命令行配置 TDinsight 使用现有的通知通道(Notification Channel)。假设你的 Grafana 用户和密码是 `admin:admin`,使用以下命令获取已有的通知通道的`uid`: +We provide a "-E" option to configure TDinsight to use the existing Notification Channel from the command line. Assuming your Grafana user and password is `admin:admin`, use the following command to get the `uid` of an existing notification channel. ```bash curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifications | jq ``` -使用上面获取的 `uid` 值作为 `-E` 输入。 +Use the `uid` value obtained above as `-E` input. ```bash sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier ``` -如果你想使用[阿里云短信](https://www.aliyun.com/product/sms)服务作为通知渠道,你应该使用`-s`标志启用并添加以下参数: +If you want to use the [Alibaba Cloud SMS](https://www.aliyun.com/product/sms) service as a notification channel, you should enable it with the `-s` flag add the following parameters. -- `-N`:Notification Channel 名,默认为`TDinsight Builtin SMS`。 -- `-U`:Channel uid,默认是 `name` 的小写,任何其他字符都替换为 - ,对于默认的 `-N`,其 uid 为 `tdinsight-builtin-sms`。 -- `-I`:阿里云短信访问密钥 id。 -- `-K`:阿里云短信访问秘钥。 -- `-S`:阿里云短信签名。 -- `-C`:阿里云短信模板 ID。 -- `-T`:阿里云短信模板参数,为 JSON 格式模板,示例如下 `'{"alarm_level":"%s","time":"%s","name":"%s","content":"%s "}'`。有四个参数:告警级别、时间、名称和告警内容。 -- `-B`:电话号码列表,以逗号`,`分隔。 +- `-N`: Notification Channel name, default is `TDinsight Builtin SMS`. +- `-U`: Channel uid, default is lowercase of `name`, any other character is replaced with -, for the default `-N`, its uid is `tdinsight-builtin-sms`. +- `-I`: Alibaba Cloud SMS access key id. +- `-K`: Alibaba Cloud SMS access secret key. +- `-S`: Alibaba Cloud SMS signature. +- `-C`: Alibaba Cloud SMS template id. +- `-T`: Alibaba Cloud SMS template parameters, for JSON format template, example is as follows `'{"alarm_level":"%s", "time":"%s", "name":"%s", "content":"%s"}'`. There are four parameters: alarm level, time, name and alarm content. +- `-B`: a list of phone numbers, separated by a comma `,`. -如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。 +If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature. ```bash -sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' -# 如果使用内置短信通知 -sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' \ +sudo . /TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' +# If using built-in SMS notifications +sudo . /TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' \ -s -N 'Env1 SMS' -I xx -K xx -S xx -C SMS_XX -T '' -B 00000000000 -L 127.0.0.01:10611 ``` -请注意,配置数据源、通知 Channel 和仪表盘在前端是不可更改的。您应该再次通过此脚本更新配置或手动更改 `/etc/grafana/provisioning` 目录(这是 Grafana 的默认目录,根据需要使用`-P`选项更改)中的配置文件。 +Please note that the configuration data source, notification channel, and dashboard are not changeable on the front end. You should update the configuration again via this script or manually change the configuration file in the `/etc/grafana/provisioning` directory (this is the default directory for Grafana, use the `-P` option to change it as needed). -特别地,当您使用 Grafana Cloud 或其他组织时,`-O` 可用于设置组织 ID。 `-G` 可指定 Grafana 插件安装目录。 `-e` 参数将仪表盘设置为可编辑。 +Specifically, `-O` can be used to set the organization ID when you are using Grafana Cloud or another organization. `-G` specifies the Grafana plugin installation directory. The `-e` parameter sets the dashboard to be editable. -## 手动设置 TDinsight +## Set up TDinsight manually -### 安装 TDengine 数据源插件 +### Install the TDengine data source plugin -从 GitHub 安装 TDengine 最新版数据源插件。 +Install the latest version of the TDengine Data Source plugin from GitHub. ```bash get_latest_release() { @@ -207,196 +208,196 @@ sudo grafana-cli \ plugins install tdengine-datasource ``` -### 配置 Grafana - -将以下设置添加到配置文件 `/etc/grafana/grafana.ini`,以启用未签名插件。 +:::note +The 3.1.6 and earlier version plugins require the following setting in the configuration file `/etc/grafana/grafana.ini` to enable unsigned plugins. ```ini [plugins] allow_loading_unsigned_plugins = tdengine-datasource ``` +::: -### 启动 Grafana 服务 +### Start the Grafana service ```bash sudo systemctl start grafana-server sudo systemctl enable grafana-server ``` -### 登录到 Grafana +### Logging into Grafana -在 Web 浏览器中打开默认的 Grafana 网址:`http://localhost:3000`。 -默认用户名/密码都是 `admin`。Grafana 会要求在首次登录后更改密码。 +Open the default Grafana URL in a web browser: ``http://localhost:3000``. +The default username/password is `admin`. Grafana will require a password change after the first login. -### 添加 TDengine 数据源 +### Adding a TDengine Data Source -指向 **Configurations** -> **Data Sources** 菜单,然后点击 **Add data source** 按钮。 +Point to the **Configurations** -> **Data Sources** menu, and click the **Add data source** button. -![添加数据源按钮](./assets/howto-add-datasource-button.png) +![TDengine Database TDinsight Add data source button](./assets/howto-add-datasource-button.webp) -搜索并选择**TDengine**。 +Search for and select **TDengine**. -![添加数据源](./assets/howto-add-datasource-tdengine.png) +![TDengine Database TDinsight Add datasource](./assets/howto-add-datasource-tdengine.webp) -配置 TDengine 数据源。 +Configure the TDengine datasource. -![数据源配置](./assets/howto-add-datasource.png) +![TDengine Database TDinsight Datasource Configuration](./assets/howto-add-datasource.webp) -保存并测试,正常情况下会报告 'TDengine Data source is working'。 +Save and test. It will report 'TDengine Data source is working' under normal circumstances. -![数据源测试](./assets/howto-add-datasource-test.png) +![TDengine Database TDinsight datasource test](./assets/howto-add-datasource-test.webp) -### 导入仪表盘 +### Importing dashboards -指向 **+** / **Create** - **import**(或 `/dashboard/import` url)。 +Point to **+** / **Create** - **import** (or `/dashboard/import` url). -![导入仪表盘和配置](./assets/import_dashboard.png) +![TDengine Database TDinsight Import Dashboard and Configuration](./assets/import_dashboard.webp) -在 **Import via grafana.com** 位置键入仪表盘 ID `15167` 并 **Load**。 +Type the dashboard ID `15167` in the **Import via grafana.com** location and **Load**. -![通过 grafana.com 导入](./assets/import-dashboard-15167.png) +![TDengine Database TDinsight Import via grafana.com](./assets/import-dashboard-15167.webp) -导入完成后,TDinsight 的完整页面视图如下所示。 +Once the import is complete, the full page view of TDinsight is shown below. -![显示](./assets/TDinsight-full.png) +![TDengine Database TDinsight show](./assets/TDinsight-full.webp) -## TDinsight 仪表盘详细信息 +## TDinsight dashboard details -TDinsight 仪表盘旨在提供 TDengine 相关资源使用情况[dnodes, mdodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster)或数据库的使用情况和状态。 +The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster) or databases. -指标详情如下: +Details of the metrics are as follows. -### 集群状态 +### Cluster Status -![tdinsight-mnodes-overview](./assets/TDinsight-1-cluster-status.png) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-1-cluster-status.webp) -这部分包括集群当前信息和状态,告警信息也在此处(从左到右,从上到下)。 +This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom). -- **First EP**:当前 TDengine 集群中的`firstEp`设置。 -- **Version**:TDengine 服务器版本(master mnode)。 -- **Master Uptime**: 当前 Master MNode 被选举为 Master 后经过的时间。 -- **Expire Time** - 企业版过期时间。 -- **Used Measuring Points** - 企业版已使用的测点数。 -- **Databases** - 数据库个数。 -- **Connections** - 当前连接个数。 -- **DNodes/MNodes/VGroups/VNodes**:每种资源的总数和存活数。 -- **DNodes/MNodes/VGroups/VNodes Alive Percent**:每种资源的存活数/总数的比例,启用告警规则,并在资源存活率(1 分钟内平均健康资源比例)不足 100%时触发。 -- **Messuring Points Used**:启用告警规则的测点数用量(社区版无数据,默认情况下是健康的)。 -- **Grants Expire Time**:启用告警规则的企业版过期时间(社区版无数据,默认情况是健康的)。 -- **Error Rate**:启用警报的集群总合错误率(每秒平均错误数)。 -- **Variables**:`show variables` 表格展示。 +- **First EP**: the `firstEp` setting in the current TDengine cluster. +- **Version**: TDengine server version (master mnode). +- **Master Uptime**: The time elapsed since the current Master MNode was elected as Master. +- **Expire Time** - Enterprise version expiration time. +- **Used Measuring Points** - The number of measuring points used by the Enterprise Edition. +- **Databases** - The number of databases. +- **Connections** - The number of current connections. +- **DNodes/MNodes/VGroups/VNodes** - Total number of each resource and the number of survivors. +- **DNodes/MNodes/VGroups/VNodes Alive Percent**: The ratio of the number of alive/total for each resource, enabling the alert rule and triggering it when the resource liveness rate (the average percentage of healthy resources in 1 minute) is less than 100%. +- **Measuring Points Used**: The number of measuring points used to enable the alert rule (no data available in the community version, healthy by default). +- **Grants Expire Time**: the expiration time of the enterprise version of the enabled alert rule (no data available for the community version, healthy by default). +- **Error Rate**: Aggregate error rate (average number of errors per second) for alert-enabled clusters. +- **Variables**: `show variables` table display. -### DNodes 状态 +### DNodes Status -![tdinsight-mnodes-overview](./assets/TDinsight-2-dnodes.png) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-2-dnodes.webp) -- **DNodes Status**:`show dnodes` 的简单表格视图。 -- **DNodes Lifetime**:从创建 dnode 开始经过的时间。 -- **DNodes Number**:DNodes 数量变化。 -- **Offline Reason**:如果有任何 dnode 状态为离线,则以饼图形式展示离线原因。 +- **DNodes Status**: simple table view of `show dnodes`. +- **DNodes Lifetime**: the time elapsed since the dnode was created. +- **DNodes Number**: the number of DNodes changes. +- **Offline Reason**: if any dnode status is offline, the reason for offline is shown as a pie chart. -### MNode 概述 +### MNode Overview -![tdinsight-mnodes-overview](./assets/TDinsight-3-mnodes.png) +![TDengine Database TDinsight mnodes overview](./assets/TDinsight-3-mnodes.webp) -1. **MNodes Status**:`show mnodes` 的简单表格视图。 -2. **MNodes Number**:类似于`DNodes Number`,MNodes 数量变化。 +1. **MNodes Status**: a simple table view of `show mnodes`. +2. **MNodes Number**: similar to `DNodes Number`, the number of MNodes changes. -### 请求 +### Request -![tdinsight-requests](./assets/TDinsight-4-requests.png) +![TDengine Database TDinsight tdinsight requests](./assets/TDinsight-4-requests.webp) -1. **Requests Rate(Inserts per Second)**:平均每秒插入次数。 -2. **Requests (Selects)**:查询请求数及变化率(count of second)。 -3. **Requests (HTTP)**:HTTP 请求数和请求速率(count of second)。 +1. **Requests Rate(Inserts per Second)**: average number of inserts per second. +2. **Requests (Selects)**: number of query requests and change rate (count of second). +3. **Requests (HTTP)**: number of HTTP requests and request rate (count of second). -### 数据库 +### Database -![tdinsight-database](./assets/TDinsight-5-database.png) +![TDengine Database TDinsight database](./assets/TDinsight-5-database.webp) -数据库使用情况,对变量 `$database` 的每个值即每个数据库进行重复多行展示。 +Database usage, repeated for each value of the variable `$database` i.e. multiple rows per database. -1. **STables**:超级表数量。 -2. **Total Tables**:所有表数量。 -3. **Sub Tables**:所有超级表子表的数量。 -4. **Tables**:所有普通表数量随时间变化图。 -5. **Tables Number Foreach VGroups**:每个 VGroups 包含的表数量。 +1. **STables**: number of super tables. +2. **Total Tables**: number of all tables. +3. **Sub Tables**: the number of all super table subtables. +4. **Tables**: graph of all normal table numbers over time. +5. **Tables Number Foreach VGroups**: The number of tables contained in each VGroups. -### DNode 资源使用情况 +### DNode Resource Usage -![dnode-usage](./assets/TDinsight-6-dnode-usage.png) +![TDengine Database TDinsight dnode usage](./assets/TDinsight-6-dnode-usage.webp) -数据节点资源使用情况展示,对变量 `$fqdn` 即每个数据节点进行重复多行展示。包括: +Data node resource usage display with repeated multiple rows for the variable `$fqdn` i.e., each data node. Includes. -1. **Uptime**:从创建 dnode 开始经过的时间。 -2. **Has MNodes?**:当前 dnode 是否为 mnode。 -3. **CPU Cores**:CPU 核数。 -4. **VNodes Number**:当前 dnode 的 VNodes 数量。 -5. **VNodes Masters**:处于 master 角色的 vnode 数量。 -6. **Current CPU Usage of taosd**:taosd 进程的 CPU 使用率。 -7. **Current Memory Usage of taosd**:taosd 进程的内存使用情况。 -8. **Disk Used**:taosd 数据目录的总磁盘使用百分比。 -9. **CPU Usage**:进程和系统 CPU 使用率。 -10. **RAM Usage**:RAM 使用指标时间序列视图。 -11. **Disk Used**:多级存储下每个级别使用的磁盘(默认为 level0 级)。 -12. **Disk Increasing Rate per Minute**:每分钟磁盘用量增加或减少的百分比。 -13. **Disk IO**:磁盘 IO 速率。 -14. **Net IO**:网络 IO,除本机网络之外的总合网络 IO 速率。 +1. **Uptime**: the time elapsed since the dnode was created. +2. **Has MNodes?**: whether the current dnode is a mnode. +3. **CPU Cores**: the number of CPU cores. +4. **VNodes Number**: the number of VNodes in the current dnode. +5. **VNodes Masters**: the number of vnodes in the master role. +6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes. +7. **Current Memory Usage of taosd**: memory usage of taosd processes. +8. **Disk Used**: The total disk usage percentage of the taosd data directory. +9. **CPU Usage**: Process and system CPU usage. +10. **RAM Usage**: Time series view of RAM usage metrics. +11. **Disk Used**: Disks used at each level of multi-level storage (default is level0). +12. **Disk Increasing Rate per Minute**: Percentage increase or decrease in disk usage per minute. +13. **Disk IO**: Disk IO rate. +14. **Net IO**: Network IO, the aggregate network IO rate in addition to the local network. -### 登录历史 +### Login History -![登录历史](./assets/TDinsight-7-login-history.png) +![TDengine Database TDinsight Login History](./assets/TDinsight-7-login-history.webp) -目前只报告每分钟登录次数。 +Currently, only the number of logins per minute is reported. -### TaosAdapter +### Monitoring taosAdapter -![taosadapter](./assets/TDinsight-8-taosadaper.png) +![TDengine Database TDinsight monitor taosadapter](./assets/TDinsight-8-taosadapter.webp) -包含 taosAdapter 请求统计和状态详情。包括: +Support monitoring taosAdapter request statistics and status details. Includes. -1. **http_request**: 包含总请求数,请求失败数以及正在处理的请求数 -2. **top 3 request endpoint**: 按终端分组,请求排名前三的数据 -3. **Memory Used**: taosAdapter 内存使用情况 -4. **latency_quantile(ms)**: (1, 2, 5, 9, 99)阶段的分位数 -5. **top 3 failed request endpoint**: 按终端分组,请求失败排名前三的数据 -6. **CPU Used**: taosAdapter cpu 使用情况 +1. **http_request**: contains the total number of requests, the number of failed requests, and the number of requests being processed +2. **top 3 request endpoint**: data of the top 3 requests by endpoint group +3. **Memory Used**: taosAdapter memory usage +4. **latency_quantile(ms)**: quantile of (1, 2, 5, 9, 99) stages +5. **top 3 failed request endpoint**: data of the top 3 failed requests by endpoint grouping +6. **CPU Used**: taosAdapter CPU usage -## 升级 +## Upgrade -通过 `TDinsight.sh` 脚本安装的 TDinsight,可以通过重新运行该脚本就可以升级到最新的 Grafana 插件和 TDinsight Dashboard。 +TDinsight installed via the `TDinsight.sh` script can be upgraded to the latest Grafana plugin and TDinsight Dashboard by re-running the script. -手动安装的情况下,可按照上述步骤自行安装新的 Grafana 插件和 Dashboard。 +In the case of a manual installation, follow the steps above to install the new Grafana plugin and Dashboard yourself. -## 卸载 +## Uninstall -通过 `TDinsight.sh` 脚本安装的 TDinsight,可以使用命令行 `TDinsight.sh -R` 清理相关资源。 +TDinsight installed via the `TDinsight.sh` script can be cleaned up using the command line `TDinsight.sh -R` to clean up the associated resources. -手动安装时,要完全卸载 TDinsight,需要清理以下内容: +To completely uninstall TDinsight during a manual installation, you need to clean up the following. -1. Grafana 中的 TDinsight Dashboard。 -2. Grafana 中的 Data Source 数据源。 -3. 从插件安装目录删除 `tdengine-datasource` 插件。 +1. the TDinsight Dashboard in Grafana. +2. the Data Source in Grafana. +3. remove the `tdengine-datasource` plugin from the plugin installation directory. -## 整合的 Docker 示例 +## Integrated Docker Example ```bash git clone --depth 1 https://github.com/taosdata/grafanaplugin.git cd grafanaplugin ``` -根据需要在 `docker-compose.yml` 文件中修改: +Change as needed in the ``docker-compose.yml`` file to ```yaml -version: "3.7" +version: '3.7' services: grafana: image: grafana/grafana:7.5.10 volumes: - - ./dist:/var/lib/grafana/plugins/tdengine-datasource - - ./grafana/grafana.ini:/etc/grafana/grafana.ini - - ./grafana/provisioning/:/etc/grafana/provisioning/ + - . /dist:/var/lib/grafana/plugins/tdengine-datasource + - . /grafana/grafana.ini:/etc/grafana/grafana.ini + - . /grafana/provisioning/:/etc/grafana/provisioning/ - grafana-data:/var/lib/grafana environment: TDENGINE_API: ${TDENGINE_API} @@ -406,7 +407,7 @@ services: SMS_ACCESS_KEY_SECRET: ${SMS_ACCESS_KEY_SECRET} SMS_SIGN_NAME: ${SMS_SIGN_NAME} SMS_TEMPLATE_CODE: ${SMS_TEMPLATE_CODE} - SMS_TEMPLATE_PARAM: "${SMS_TEMPLATE_PARAM}" + SMS_TEMPLATE_PARAM: '${SMS_TEMPLATE_PARAM}' SMS_PHONE_NUMBERS: $SMS_PHONE_NUMBERS SMS_LISTEN_ADDR: ${SMS_LISTEN_ADDR} ports: @@ -415,13 +416,13 @@ volumes: grafana-data: ``` -替换`docker-compose.yml`中的环境变量或保存环境变量到`.env`文件,然后用`docker-compose up`启动 Grafana。`docker-compose` 工具的具体用法参见 [Docker Compose Reference](https://docs.docker.com/compose/) +Replace the environment variables in `docker-compose.yml` or save the environment variables to the `.env` file, then start Grafana with `docker-compose up`. See [Docker Compose Reference](https://docs.docker.com/compose/) ```bash docker-compose up -d ``` -TDinsight 已经通过 Provisioning 部署完毕,请到 http://localhost:3000/d/tdinsight/ 查看仪表盘。 +Then the TDinsight was deployed via Provisioning. Go to http://localhost:3000/d/tdinsight/ to view the dashboard. [grafana]: https://grafana.com -[tdengine]: https://www.taosdata.com +[tdengine]: https://tdengine.com diff --git a/docs-en/14-reference/08-taos-shell.md b/docs-en/14-reference/08-taos-shell.md index b3a88f3d5a6124b830847375412faf896cefd12c..002b515093258152e85dd9d7437e424dfa98c874 100644 --- a/docs-en/14-reference/08-taos-shell.md +++ b/docs-en/14-reference/08-taos-shell.md @@ -1,14 +1,14 @@ --- -title: TDengine Command Line (CLI) -sidebar_label: TDengine CLI +title: TDengine Command Line Interface (CLI) +sidebar_label: Command Line Interface description: Instructions and tips for using the TDengine CLI --- -The TDengine command-line application (hereafter referred to as `TDengine CLI`) is the most feasility way for users to manipulate and interact with TDengine instances. +The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances. ## Installation -If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI on the environemtn which no TDengine server running, the TDengine client installation package needs to be installed first. For details, please refer to [connector](/reference/connector/). +If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [connector](/reference/connector/). ## Execution @@ -62,13 +62,13 @@ And many more parameters. - -f, --file=FILE: Execute the SQL script file in non-interactive mode - -k, --check=CHECK: Specify the table to be checked - -l, --pktlen=PKTLEN: Test package size to be used for network testing -- -n, --netrole=NETROLE: test scope for network connection test, default is `startup`, The value can be `client`, `server`, `rpc`, `startup`, `sync`, `speed`, or `fqdn`. +- -n, --netrole=NETROLE: test scope for network connection test, default is `startup`. The value can be `client`, `server`, `rpc`, `startup`, `sync`, `speed`, or `fqdn`. - -r, --raw-time: output the timestamp format as unsigned 64-bits integer (uint64_t in C language) - -s, --commands=COMMAND: execute SQL commands in non-interactive mode -- -S, --pkttype=PKTTYPE: Specify the packet type used for network testing. The default is TCP. can be specified as either TCP or UDP when `speed` is specified to netrole parameter +- -S, --pkttype=PKTTYPE: Specify the packet type used for network testing. The default is TCP, can be specified as either TCP or UDP when `speed` is specified to `netrole` parameter - -T, --thread=THREADNUM: The number of threads to import data in multi-threaded mode - -s, --commands: Run TDengine CLI commands without entering the terminal -- -z, --timezone=TIMEZONE: Specify time zone. Default is the value of current configruation file +- -z, --timezone=TIMEZONE: Specify time zone. Default is the value of current configuration file - -V, --version: Print out the current version number Example. diff --git a/docs-en/14-reference/11-docker/index.md b/docs-en/14-reference/11-docker/index.md index 4ca84be369e14b3223e8609e06c9ebc4e35eaa2d..b7e60ab3e7f04a6078950977a563382a3524ebaa 100644 --- a/docs-en/14-reference/11-docker/index.md +++ b/docs-en/14-reference/11-docker/index.md @@ -13,7 +13,7 @@ The TDengine image starts with the HTTP service activated by default, using the docker run -d --name tdengine -p 6041:6041 tdengine/tdengine ``` -The above command starts a container named "tdengine" and maps the HTTP service end 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. +The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. ```shell curl -u root:taosdata -d "show databases" localhost:6041/rest/sql @@ -34,7 +34,7 @@ taos> show databases; Query OK, 1 row(s) in set (0.002843s) ``` -The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from containerized using TDengine CLI or various connectors in some complex scenarios. +The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various connectors for complex scenarios. ## Start TDengine on the host network @@ -42,7 +42,7 @@ The TDengine server running in the container uses the container's hostname to es docker run -d --name tdengine --network host tdengine/tdengine ``` -The above command starts TDengine on the host network and uses the host's FQDN to establish a connection instead of the container's hostname. It works too, like using `systemctl` to start TDengine on the host. If the TDengine client is already installed on the host, you can access it directly with the following command. +The above command starts TDengine on the host network and uses the host's FQDN to establish a connection instead of the container's hostname. It is the equivalent of using `systemctl` to start TDengine on the host. If the TDengine client is already installed on the host, you can access it directly with the following command. ```shell $ taos @@ -315,13 +315,13 @@ password: taosdata taoslog-td2: ``` - :::note +:::note - The `VERSION` environment variable is used to set the tdengine image tag - `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time - `TAOS_REPLICA` is used to set the default number of database replicas. Its value range is [1,3] - We recommend setting with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment. - ::: - + We recommend setting it with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment. + + ::: 2. Start the cluster @@ -382,7 +382,7 @@ password: taosdata Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example: ```docker - ersion: "3" + version: "3" networks: inter: diff --git a/docs-en/14-reference/12-config/index.md b/docs-en/14-reference/12-config/index.md index d420d64e97bdde616a57a6a312d21dd6d4e946ff..8ad9a474a02c5cc52559ccdc5910ad9d7b6264ae 100644 --- a/docs-en/14-reference/12-config/index.md +++ b/docs-en/14-reference/12-config/index.md @@ -65,7 +65,7 @@ taos --dump-config | ------------- | ------------------------------------------------------------------------ | | Applicable | Server Only | | Meaning | The FQDN of the host where `taosd` will be started. It can be IP address | -| Default Value | The first hostname configured for the hos | +| Default Value | The first hostname configured for the host | | Note | It should be within 96 bytes | ### serverPort @@ -73,12 +73,12 @@ taos --dump-config | Attribute | Description | | ------------- | ------------------------------------------------------------------------------------------------------------------------------- | | Applicable | Server Only | -| Meaning | The port for external access after `taosd` is started 号 | +| Meaning | The port for external access after `taosd` is started | | Default Value | 6030 | | Note | REST service is provided by `taosd` before 2.4.0.0 but by `taosAdapter` after 2.4.0.0, the default port of REST service is 6041 | :::note -TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by `serverPort`. These ports need to be kept as open if firewall is enabled. Below table describes the ports used by TDengine in details. +TDengine uses 13 continuous ports, both TCP and UDP, starting with the port specified by `serverPort`. You should ensure, in your firewall rules, that these ports are kept open. Below table describes the ports used by TDengine in details. ::: @@ -90,8 +90,8 @@ TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by | TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) | | TCP | 6042 | Service Port of Arbitrator | The parameter of Arbitrator | | TCP | 6043 | Service Port of TaosKeeper | The parameter of TaosKeeper | -| TCP | 6044 | Data access port for StatsD | efer to [taosAdapter](/reference/taosadapter/) | -| UDP | 6045 | Data access for statsd | efer to [taosAdapter](/reference/taosadapter/) | +| TCP | 6044 | Data access port for StatsD | refer to [taosAdapter](/reference/taosadapter/) | +| UDP | 6045 | Data access for statsd | refer to [taosAdapter](/reference/taosadapter/) | | TCP | 6060 | Port of Monitoring Service in Enterprise version | | | UDP | 6030-6034 | Communication between client and server | serverPort | | UDP | 6035-6039 | Communication among server nodes in cluster | serverPort | @@ -120,7 +120,7 @@ TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by | Attribute | Description | | ------------- | ------------------------------------------------------------------- | | Applicable | Server and Client | -| Meaning | TCP is used forcely | +| Meaning | TCP is used by force | | Value Range | 0: disabled 1: enabled | | Default Value | 0 | | Note | It's suggested to configure to enable if network is not good enough | @@ -133,7 +133,7 @@ TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Applicable | Server Only | | Meaning | The switch for monitoring inside server. The workload of the hosts, including CPU, memory, disk, network, TTP requests, are collected and stored in a system builtin database `LOG` | -| Value Range | 0: monitoring disabled, 1: monitoring enabled 务. | +| Value Range | 0: monitoring disabled, 1: monitoring enabled | | Default Value | 0 | ### monitorInterval @@ -159,13 +159,13 @@ TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by ### queryBufferSize -| Attribute | Description | -| ------------- | --------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The total memory size reserved for all queries | -| Unit | MB | -| Default Value | 无 | -| Note | It can be estimated by "maximum number of concurrent quries" _ "number of tables" _ 170 | +| Attribute | Description | +| ------------- | ---------------------------------------------------------------------------------------- | +| Applicable | Server Only | +| Meaning | The total memory size reserved for all queries | +| Unit | MB | +| Default Value | None | +| Note | It can be estimated by "maximum number of concurrent queries" _ "number of tables" _ 170 | ### ratioOfQueryCores @@ -182,8 +182,8 @@ TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by | ------------- | -------------------------------------------- | | Applicable | Server Only | | Meaning | The maximum number of distinct rows returned | -| Value Range | [100,000 - 100, 000, 000] | -| Default Value | 100, 000 | +| Value Range | [100,000 - 100,000,000] | +| Default Value | 100,000 | | Note | After version 2.3.0.0 | ## Locale Parameters @@ -197,19 +197,19 @@ TDengine uses continuous 13 ports, both TCP and TCP, from the port specified by | Default Value | TimeZone configured in the host | :::info -To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored TDengie. The timestamp generated from any timezones at same time is same in Unix timestamp. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly. +To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly. On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. ``` -timezone UTC-8 +timezone UTC-7 timezone GMT-8 timezone Asia/Shanghai ``` The above examples are all proper configuration for the timezone of UTC+8. On Windows system, however, `timezone Asia/Shanghai` is not supported, it must be set as `timezone UTC-8`. -The setting for timezone impacts the strings not in Unix timestamp, keywords or functions related to date/time, for example +The setting for timezone impacts strings that are not in Unix timestamp format and keywords or functions related to date/time. For example: ```sql SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08'; @@ -227,7 +227,7 @@ If the timezone is UTC, it's equal to SELECT count(*) FROM table_name WHERE TS<1554984068000; ``` -To avoid the problems of using time strings, Unix timestamp can be used directly. Furthermore, time strings with timezone can be used in SQL statement, for example "2013-04-12T15:52:01.123+08:00" in RFC3339 format or "2013-04-12T15:52:01.123+0800" in ISO-8601 format, they are not influenced by timezone setting when converted to Unix timestamp. +To avoid the problems of using time strings, Unix timestamp can be used directly. Furthermore, time strings with timezone can be used in SQL statements. For example "2013-04-12T15:52:01.123+08:00" in RFC3339 format or "2013-04-12T15:52:01.123+0800" in ISO-8601 format are not influenced by timezone setting when converted to Unix timestamp. ::: @@ -240,21 +240,21 @@ To avoid the problems of using time strings, Unix timestamp can be used directly | Default Value | Locale configured in host | :::info -A specific type "nchar" is provied in TDengine to store non-ASCII characters such as Chinese, Japanese, Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly. +A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly. The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE. -The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. On Linux andMac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset. +The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset. ::: ### charset -| Attribute | Description | -| ------------- | ---------------------------- | -| Applicable | Server and Client | -| Meaning | Character | -| Default Value | charset set in the system 系 | +| Attribute | Description | +| ------------- | ------------------------- | +| Applicable | Server and Client | +| Meaning | Character | +| Default Value | charset set in the system | :::info On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the charset is obtained from system locale. If obtaining charset from system locale fails, `taos` would fail to start. So on Linux system, if system locale is set properly, it's not necessary to set `charset` in `taos.cfg`. For example: @@ -263,7 +263,7 @@ On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the cha locale zh_CN.UTF-8 ``` -Besides, on Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the one who comes later in the configuration file is used. +On a Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence. ```title="Effective charset is GBK" locale zh_CN.UTF-8 @@ -346,12 +346,12 @@ charset CP936 ### walLevel -| Attribute | Description | -| ------------- | ------------------------------------------------------------ | -| Applicable | Server Only | -| Meaning | WAL level | +| Attribute | Description | +| ------------- | ---------------------------------------------------------------------------------- | +| Applicable | Server Only | +| Meaning | WAL level | | Value Range | 0: wal disabled
1: wal enabled without fsync
2: wal enabled with fsync | -| Default Value | 1 | +| Default Value | 1 | ### fsync @@ -430,12 +430,12 @@ charset CP936 ### quorum -| Attribute | Description | -| ------------- | --------------------------------------------------------------------------------------------- | -| Applicable | Server Only | -| Meaning | The number of required confirmations for data replication in case of multiple replications 多 | -| Value Range | 1,2 | -| Default Value | 1 | +| Attribute | Description | +| ------------- | ------------------------------------------------------------------------------------------ | +| Applicable | Server Only | +| Meaning | The number of required confirmations for data replication in case of multiple replications | +| Value Range | 1,2 | +| Default Value | 1 | ### role @@ -552,7 +552,7 @@ charset CP936 | Meaning | The expiration time for dnode online status, once it's reached before receiving status from a node, the dnode becomes offline | | Unit | second | | Value Range | 5-7200000 | -| Default Value | 86400\*10(10 天) | +| Default Value | 86400\*10 (i.e. 10 days) | ## Performance Optimization Parameters @@ -569,7 +569,7 @@ charset CP936 | Attribute | Description | | ------------- | --------------------------------------------------------------------------------------------- | | Applicable | Server Only | -| Meaning | Maximum number of query threads 量 | +| Meaning | Maximum number of query threads | | Value Range | 0: Only one query thread
1: Same as number of CPU cores
2: two times of CPU cores | | Default Value | 1 | | Note | This value can be a float number, 0.5 means half of the CPU cores | @@ -700,7 +700,7 @@ charset CP936 | Default Value | 0.0000000000000001 | | Note | The fractional part lower than this value will be discarded | -## Continuous Query Prameters +## Continuous Query Parameters ### stream @@ -778,8 +778,8 @@ To prevent system resource from being exhausted by multiple concurrent streams, ## HTTP Parameters :::note -HTTP server had been provided by `taosd` prior to version 2.4.0.0, now is provided by `taosAdapter` after version 2.4.0.0. -The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter]](/reference/taosadapter/). +HTTP service was provided by `taosd` prior to version 2.4.0.0 and is provided by `taosAdapter` after version 2.4.0.0. +The parameters described in this section are only application in versions prior to 2.4.0.0. If you are using any version from 2.4.0.0, please refer to [taosAdapter](/reference/taosadapter/). ::: @@ -961,7 +961,7 @@ The parameters described in this section are only application in versions prior | ------------- | ----------------------- | | Applicable | Client Only | | Meaning | Log level of jni module | -| Value Range | 同上 | +| Value Range | Same as debugFlag | | Default Value | | ### odbcDebugFlag @@ -1100,12 +1100,12 @@ If the length of value exceeds `maxBinaryDisplayWidth`, then the actual display ### maxRegexStringLen -| Attribute | Description | -| ------------- | ----------------------------------------------------------- | -| Meaning | Maximum length of regular expression 正则表达式最大允许长度 | -| Value Range | [128, 16384] | -| Default Value | 128 | -| Note | From version 2.3.0.0 | +| Attribute | Description | +| ------------- | ------------------------------------ | +| Meaning | Maximum length of regular expression | +| Value Range | [128, 16384] | +| Default Value | 128 | +| Note | From version 2.3.0.0 | ## Other Parameters diff --git a/docs-en/14-reference/12-directory.md b/docs-en/14-reference/12-directory.md index dbdba2b715bb41baf9b70dce91a3065e585d0434..304e3bcb434ee9a6ba338577a4d1ba546b548e3f 100644 --- a/docs-en/14-reference/12-directory.md +++ b/docs-en/14-reference/12-directory.md @@ -32,7 +32,7 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d - _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution. :::note -taosdump after version 2.4.0.0 require taosTools as a standalone installation. A few version taosBenchmark is include in taosTools too. +taosdump after version 2.4.0.0 require taosTools as a standalone installation. A new version of taosBenchmark is include in taosTools too. ::: :::tip diff --git a/docs-en/14-reference/13-schemaless/13-schemaless.md b/docs-en/14-reference/13-schemaless/13-schemaless.md index d9ce9b434dd14a89d243b2ed629f3fde64e6aba0..acbbb1cd3c5a7c50e226644f2de9e0e77274c6dd 100644 --- a/docs-en/14-reference/13-schemaless/13-schemaless.md +++ b/docs-en/14-reference/13-schemaless/13-schemaless.md @@ -1,19 +1,19 @@ --- title: Schemaless Writing -description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data as it is written to the interface." +description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface." --- -In IoT applications, many data items are often collected for intelligent control, business analysis, device monitoring, etc. Due to the version upgrade of the application logic, or the hardware adjustment of the device itself, the data collection items may change more frequently. To facilitate the data logging work in such cases, TDengine starting from version 2.2.0.0, it provides a series of interfaces to the schemaless writing method, which eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data as the data is written to the interface. And when necessary, Schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. +In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine starting from version 2.2.0.0, provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly. -The schemaless writing method creates super tables and their corresponding sub-tables completely indistinguishable from the super tables and sub-tables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and lack readability. +The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability. ## Schemaless Writing Line Protocol -TDengine's schemaless writing line protocol supports to be compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content. +TDengine's schemaless writing line protocol supports InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content. For the standard writing protocols of InfluxDB and OpenTSDB, please refer to the documentation of each protocol. The following is a description of TDengine's extended protocol, based on InfluxDB's line protocol first. They allow users to control the (super table) schema more granularly. -With the following formatting conventions, Schemaless writing uses a single string to express a data row (multiple rows can be passed into the writing API at once to enable bulk writing). +With the following formatting conventions, schemaless writing uses a single string to express a data row (multiple rows can be passed into the writing API at once to enable bulk writing). ```json measurement,tag_set field_set timestamp @@ -23,7 +23,7 @@ where : - measurement will be used as the data table name. It will be separated from tag_set by a comma. - tag_set will be used as tag data in the format `=,=`, i.e. multiple tags' data can be separated by a comma. It is separated from field_set by space. -- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by space. +- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by a space. - The timestamp is the primary key corresponding to the data in this row. All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes ("). @@ -32,7 +32,7 @@ In the schemaless writing data line protocol, each data item in the field_set ne - If there are English double quotes on both sides, it indicates the BINARY(32) type. For example, `"abc"`. - If there are double quotes on both sides and an L prefix, it means NCHAR(32) type. For example, `L"error message"`. -- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\) in front. (All refer to the ASCII character) +- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character) - Numeric types will be distinguished from data types by the suffix. | **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** | @@ -58,26 +58,25 @@ Note that if the wrong case is used when describing the data type suffix, or if Schemaless writes process row data according to the following principles. -1. You can use the following rules to generate the sub-table names: first, combine the measurement name and the key and value of the label into the next string: +1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string: ```json "measurement,tag_key1=tag_value1,tag_key2=tag_value2" ``` Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol. -The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has. 2. +The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has. 2. If the super table obtained by parsing the line protocol does not exist, this super table is created. -If the sub-table obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the sub-table name determined in steps 1 or 2. 4. +If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2. 4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental). 5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL. 6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data. -7. If the specified data sub-table already exists, and the specified tag column takes a value different from the saved value this time, the value in the latest data row overwrites the old tag column take value. +7. If the specified data subtable already exists, and the specified tag column takes a value different from the saved value this time, the value in the latest data row overwrites the old tag column take value. 8. Errors encountered throughout the processing will interrupt the writing process and return an error code. :::tip -All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed -16k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. +All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. ::: ## Time resolution recognition @@ -87,7 +86,7 @@ Three specified modes are supported in the schemaless writing process, as follow | **Serial** | **Value** | **Description** | | -------- | ------------------- | ------------------------------- | | 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol | -| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol | | 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol +| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol | | 3 | SML_JSON_PROTOCOL | JSON protocol format | In the SML_LINE_PROTOCOL parsing mode, the user is required to specify the time resolution of the input timestamp. The available time resolutions are shown in the following table. @@ -106,8 +105,11 @@ In SML_TELNET_PROTOCOL and SML_JSON_PROTOCOL modes, the time precision is determ ## Data schema mapping rules -This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped to -The tag name in tag_set is the name of the tag in the data schema, and the name in field_set is the column's name. The following data is used as an example to illustrate the mapping rules. +This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped as follows: +- The tag name in tag_set is the name of the tag in the data schema +- The name in field_set is the column's name. + +The following data is used as an example to illustrate the mapping rules. ```json st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 @@ -139,7 +141,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000 st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000 ``` -The first line of the line protocol parsing will declare column c5 is a BINARY(4) field, the second line data write will extract column c5 is still a BINARY column. Still, its width is 6, then you need to increase the width of the BINARY field to be able to accommodate the new string. +The first line of the line protocol parsing will declare column c5 is a BINARY(4) field. The second line data write will parse column c5 as a BINARY column. But in the second line, c5's width is 6 so you need to increase the width of the BINARY field to be able to accommodate the new string. ```json st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000 diff --git a/docs-en/14-reference/_collectd.mdx b/docs-en/14-reference/_collectd.mdx index 1f57d883eec9feadc3cc460bf968b0dd43fedfe8..ce88328098a181de48dcaa080ef45f228b20bf1c 100644 --- a/docs-en/14-reference/_collectd.mdx +++ b/docs-en/14-reference/_collectd.mdx @@ -25,7 +25,7 @@ The default database name written by taosAdapter is `collectd`. You can also mod #collectd collectd uses a plugin mechanism to write the collected monitoring data to different data storage software in various forms. tdengine supports both direct collection plugins and write_tsdb plugins. -#### is configured to receive data from the direct collection plugin +#### Configure the direct collection plugin Modify the relevant configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf). @@ -62,7 +62,7 @@ LoadPlugin write_tsdb ``` -Where fills in the server's domain name or IP address running taosAdapter. Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047). +Where is the domain name or IP address of the server running taosAdapter. Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047). ```text LoadPlugin write_tsdb diff --git a/docs-en/14-reference/_tcollector.mdx b/docs-en/14-reference/_tcollector.mdx index 85794d54007b70acf205b1bbc897cec1d0c4f824..42b021410e3862c4fa328d8dae40dcac1456e929 100644 --- a/docs-en/14-reference/_tcollector.mdx +++ b/docs-en/14-reference/_tcollector.mdx @@ -17,7 +17,7 @@ password = "taosdata" ... ``` -The taosAdapter writes to the database with the default name `tcollector`. You can also modify the taosAdapter configuration file dbs entry to specify a different name. user and password fill in the actual TDengine configuration values. After changing the configuration file, you need to restart the taosAdapter. +The taosAdapter writes to the database with the default name `tcollector`. You can also modify the taosAdapter configuration file dbs entry to specify a different name. Fill in the actual user and password for TDengine. After changing the configuration file, you need to restart the taosAdapter. - You can also enable taosAdapter to receive tcollector data by using the taosAdapter command-line parameters or setting environment variables. @@ -25,7 +25,7 @@ The taosAdapter writes to the database with the default name `tcollector`. You c To use TCollector, you need to download its [source code](https://github.com/OpenTSDB/tcollector). Its configuration items are in its source code. Note: TCollector differs significantly from version to version, so here is an example of the latest code for the current master branch (git commit: 37ae920). -Modify the contents of the `collectors/etc/config.py` and `tcollector.py` files. Change the address of the OpenTSDB host to the domain name or IP address of the server where taosAdapter is deployed, and change the port to the port that taosAdapter supports TCollector on (default is 6049). +Modify the contents of the `collectors/etc/config.py` and `tcollector.py` files. Change the address of the OpenTSDB host to the domain name or IP address of the server where taosAdapter is deployed, and change the port to the port on which taosAdapter supports TCollector (default is 6049). Example of git diff output of source code changes. diff --git a/docs-en/14-reference/index.md b/docs-en/14-reference/index.md index 89f675902d01ba2d2c1b322408c372429d6bda1c..f350eebfc1a1ca2feaedc18c4b4fa798742e31b4 100644 --- a/docs-en/14-reference/index.md +++ b/docs-en/14-reference/index.md @@ -2,11 +2,11 @@ title: Reference --- -The reference guide is the detailed introduction to TDengine, various TDengine's connectors in different languages, and the tools that come with it. +The reference guide is a detailed introduction to TDengine including various TDengine connectors in different languages, and the tools that come with TDengine. ```mdx-code-block import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-en/14-reference/taosAdapter-architecture.png b/docs-en/14-reference/taosAdapter-architecture.png deleted file mode 100644 index 08a9018553aae6f86b42d127b372d0cecfa9bdf8..0000000000000000000000000000000000000000 Binary files a/docs-en/14-reference/taosAdapter-architecture.png and /dev/null differ diff --git a/docs-en/14-reference/taosAdapter-architecture.webp b/docs-en/14-reference/taosAdapter-architecture.webp new file mode 100644 index 0000000000000000000000000000000000000000..a4162b0a037c06d34191784716c51080b9f8a570 Binary files /dev/null and b/docs-en/14-reference/taosAdapter-architecture.webp differ diff --git a/docs-en/20-third-party/01-grafana.mdx b/docs-en/20-third-party/01-grafana.mdx index c1bfd4a96a4576df8570d8b480d5c2afe47e20b8..b3cab6271001feb56714f808906cb78ba1098593 100644 --- a/docs-en/20-third-party/01-grafana.mdx +++ b/docs-en/20-third-party/01-grafana.mdx @@ -3,13 +3,13 @@ sidebar_label: Grafana title: Grafana --- -TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a DashBoard. +TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard. You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md). ## Prerequisites -In order for Grafana to add the TDengine data source successfully, the following preparations are required: +In order for Grafana to add the TDengine data source successfully, the following preparation is required: 1. The TDengine cluster is deployed and functioning properly 2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details. @@ -23,7 +23,7 @@ You can download The Grafana plugin for TDengine from Data Sources` on the left side, as shown in the following figure. -![img](./grafana/add_datasource1.jpg) +![TDengine Database TDinsight plugin add datasource 1](./grafana/add_datasource1.webp) Click `Add data source` to enter the Add data source page, and enter TDengine in the query box to add it, as shown in the following figure. -![img](./grafana/add_datasource2.jpg) +![TDengine Database TDinsight plugin add datasource 2](./grafana/add_datasource2.webp) Enter the datasource configuration page, and follow the default prompts to modify the corresponding configuration. -![img](./grafana/add_datasource3.jpg) +![TDengine Database TDinsight plugin add database 3](./grafana/add_datasource3.webp) - Host: IP address of the server where the components of the TDengine cluster provide REST service (offered by taosd before 2.4 and by taosAdapter since 2.4) and the port number of the TDengine REST service (6041), by default use `http://localhost:6041`. - User: TDengine user name. - Password: TDengine user password. -Click `Save & Test` to test. Follows are a success. +Click `Save & Test` to test. You should see a success message if the test worked. -![img](./grafana/add_datasource4.jpg) +![TDengine Database TDinsight plugin add database 4](./grafana/add_datasource4.webp) ### Create Dashboard -Go back to the main interface to create the Dashboard, click Add Query to enter the panel query page: +Go back to the main interface to create a dashboard and click Add Query to enter the panel query page: -![img](./grafana/create_dashboard1.jpg) +![TDengine Database TDinsight plugin create dashboard 1](./grafana/create_dashboard1.webp) As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query. -- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, ` custom template variables are also supported. +- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, custom template variables are also supported. - ALIAS BY: This allows you to set the current query alias. - GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement. Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows. -![img](./grafana/create_dashboard2.jpg) +![TDengine Database TDinsight plugin create dashboard 2](./grafana/create_dashboard2.webp) > For more information on how to use Grafana to create the appropriate monitoring interface and for more details on using Grafana, refer to the official Grafana [documentation](https://grafana.com/docs/). diff --git a/docs-en/20-third-party/03-telegraf.md b/docs-en/20-third-party/03-telegraf.md index 0d563c9ff36268ac27e18e21fefed789789dc1a7..6a7aac322f9def880f58d7ed0adcc4a8f3687ed1 100644 --- a/docs-en/20-third-party/03-telegraf.md +++ b/docs-en/20-third-party/03-telegraf.md @@ -5,7 +5,7 @@ title: Telegraf writing import Telegraf from "../14-reference/_telegraf.mdx" -Telegraf is a viral metrics collection open-source software. Telegraf can collect the operation information of various components without writing any scripts to collect regularly, reducing the difficulty of data acquisition. +Telegraf is a viral, open-source, metrics collection software. Telegraf can collect the operation information of various components without having to write any scripts to collect regularly, reducing the difficulty of data acquisition. Telegraf's data can be written to TDengine by simply adding the output configuration of Telegraf to the URL corresponding to taosAdapter and modifying several configuration items. The presence of Telegraf data in TDengine can take advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data. diff --git a/docs-en/20-third-party/05-collectd.md b/docs-en/20-third-party/05-collectd.md index 609e55842ab35cdc2d394663f5450f908e49f7f7..db62f2ecd1afb4936466ca0243a7e14ff294f8b6 100644 --- a/docs-en/20-third-party/05-collectd.md +++ b/docs-en/20-third-party/05-collectd.md @@ -6,7 +6,7 @@ title: collectd writing import CollectD from "../14-reference/_collectd.mdx" -collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics number while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load. +collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load. You can write the data collected by collectd to TDengine by simply modifying the configuration of collectd to the domain name (or IP address) and corresponding port of the server running taosAdapter. It can take full advantage of TDengine's efficient storage query performance and clustering capability for time-series data. diff --git a/docs-en/20-third-party/06-statsd.md b/docs-en/20-third-party/06-statsd.md index bf4b6c7ab5dac4114cad0d650b2aeb026a67581c..40e927b9fd1d2eca9d454a987ac51d533eb75005 100644 --- a/docs-en/20-third-party/06-statsd.md +++ b/docs-en/20-third-party/06-statsd.md @@ -7,7 +7,7 @@ import StatsD from "../14-reference/_statsd.mdx" StatsD is a simple daemon for aggregating application metrics, which has evolved rapidly in recent years into a unified protocol for collecting application performance metrics. -You can write StatsD data to TDengine by simply modifying in the configuration file of StatsD with the domain name (or IP address) of the server running taosAdapter and the corresponding port. It can take full advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data. +You can write StatsD data to TDengine by simply modifying the configuration file of StatsD with the domain name (or IP address) of the server running taosAdapter and the corresponding port. It can take full advantage of TDengine's efficient storage query performance and clustering capabilities for time-series data. ## Prerequisites diff --git a/docs-en/20-third-party/07-icinga2.md b/docs-en/20-third-party/07-icinga2.md index ba9cde8cea7504ac9df871d5f6aa42cc5c94d895..b27196dfe313b468eeb73ff4b114d9d955618c3e 100644 --- a/docs-en/20-third-party/07-icinga2.md +++ b/docs-en/20-third-party/07-icinga2.md @@ -5,7 +5,7 @@ title: icinga2 writing import Icinga2 from "../14-reference/_icinga2.mdx" -icinga2 is an open-source software monitoring host and network initially developed from the Nagios network monitoring application. Currently, icinga2 is distributed under the GNU GPL v2 license. +icinga2 is an open-source, host and network monitoring software initially developed from the Nagios network monitoring application. Currently, icinga2 is distributed under the GNU GPL v2 license. You can write the data collected by icinga2 to TDengine by simply modifying the icinga2 configuration to point to the taosAdapter server and the corresponding port, taking advantage of TDengine's efficient storage and query performance and clustering capabilities for time-series data. diff --git a/docs-en/20-third-party/09-emq-broker.md b/docs-en/20-third-party/09-emq-broker.md index 13562ba7f720499c23771437c5c6ba0f61819456..d3eafebc14e8ddc29b03abf8785a6c0a013ef014 100644 --- a/docs-en/20-third-party/09-emq-broker.md +++ b/docs-en/20-third-party/09-emq-broker.md @@ -3,7 +3,7 @@ sidebar_label: EMQX Broker title: EMQX Broker writing --- -MQTT is a popular IoT data transfer protocol, [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software, without any code, only need to use "rules" in EMQX Dashboard to do simple configuration. You can write MQTT data directly to TDengine. EMQX supports saving data to TDengine by sending it to web services and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it. tdengine). +MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software. You can write MQTT data directly to TDengine without any code. You only need to setup "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending data to a web service and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.). ## Prerequisites @@ -44,25 +44,25 @@ Since the configuration interface of EMQX differs from version to version, here Use your browser to open the URL `http://IP:18083` and log in to EMQX Dashboard. The initial installation username is `admin` and the password is: `public`. -![img](./emqx/login-dashboard.png) +![TDengine Database EMQX login dashboard](./emqx/login-dashboard.webp) ### Creating Rule Select "Rule" in the "Rule Engine" on the left and click the "Create" button: ! -![img](./emqx/rule-engine.png) +![TDengine Database EMQX rule engine](./emqx/rule-engine.webp) ### Edit SQL fields -![img](./emqx/create-rule.png) +![TDengine Database EMQX create rule](./emqx/create-rule.webp) ### Add "action handler" -![img](./emqx/add-action-handler.png) +![TDengine Database EMQX add action handler](./emqx/add-action-handler.webp) ### Add "Resource" -![img](./emqx/create-resource.png) +![TDengine Database EMQX create resource](./emqx/create-resource.webp) Select "Data to Web Service" and click the "New Resource" button. @@ -70,13 +70,13 @@ Select "Data to Web Service" and click the "New Resource" button. Select "Data to Web Service" and fill in the request URL as the address and port of the server running taosAdapter (default is 6041). Leave the other properties at their default values. -![img](./emqx/edit-resource.png) +![TDengine Database EMQX edit resource](./emqx/edit-resource.webp) ### Edit "action" Edit the resource configuration to add the key/value pairing for Authorization. Please refer to the [ TDengine REST API documentation ](https://docs.taosdata.com/reference/rest-api/) for the authorization in details. Enter the rule engine replacement template in the message body. -![img](./emqx/edit-action.png) +![TDengine Database EMQX edit action](./emqx/edit-action.webp) ## Compose program to mock data @@ -163,7 +163,7 @@ Edit the resource configuration to add the key/value pairing for Authorization. Note: `CLIENT_NUM` in the code can be set to a smaller value at the beginning of the test to avoid hardware performance be not capable to handle a more significant number of concurrent clients. -![img](./emqx/client-num.png) +![TDengine Database EMQX client num](./emqx/client-num.webp) ## Execute tests to simulate sending MQTT data @@ -172,19 +172,19 @@ npm install mqtt mockjs --save ---registry=https://registry.npm.taobao.org node mock.js ``` -![img](./emqx/run-mock.png) +![TDengine Database EMQX run mock](./emqx/run-mock.webp) ## Verify that EMQX is receiving data Refresh the EMQX Dashboard rules engine interface to see how many records were received correctly: -![img](./emqx/check-rule-matched.png) +![TDengine Database EMQX rule matched](./emqx/check-rule-matched.webp) ## Verify that data writing to TDengine Use the TDengine CLI program to log in and query the appropriate databases and tables to verify that the data is being written to TDengine correctly: -![img](./emqx/check-result-in-taos.png) +![TDengine Database EMQX result in taos](./emqx/check-result-in-taos.webp) Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine. EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX. diff --git a/docs-en/20-third-party/11-kafka.md b/docs-en/20-third-party/11-kafka.md new file mode 100644 index 0000000000000000000000000000000000000000..6720af8bf81ea2f4fce415a54847453f578ababf --- /dev/null +++ b/docs-en/20-third-party/11-kafka.md @@ -0,0 +1,439 @@ +--- +sidebar_label: Kafka +title: TDengine Kafka Connector Tutorial +--- + +TDengine Kafka Connector contains two plugins: TDengine Source Connector and TDengine Sink Connector. Users only need to provide a simple configuration file to synchronize the data of the specified topic in Kafka (batch or real-time) to TDengine or synchronize the data (batch or real-time) of the specified database in TDengine to Kafka. + +## What is Kafka Connect? + +Kafka Connect is a component of [Apache Kafka](https://kafka.apache.org/) that enables other systems, such as databases, cloud services, file systems, etc., to connect to Kafka easily. Data can flow from other software to Kafka via Kafka Connect and Kafka to other systems via Kafka Connect. Plugins that read data from other software are called Source Connectors, and plugins that write data to other software are called Sink Connectors. Neither Source Connector nor Sink Connector will directly connect to Kafka Broker, and Source Connector transfers data to Kafka Connect. Sink Connector receives data from Kafka Connect. + +![TDengine Database Kafka Connector -- Kafka Connect](kafka/Kafka_Connect.webp) + +TDengine Source Connector is used to read data from TDengine in real-time and send it to Kafka Connect. Users can use The TDengine Sink Connector to receive data from Kafka Connect and write it to TDengine. + +![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) + +## What is Confluent? + +[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include: + +1. Schema Registry +2. REST Proxy +3. Non-Java Clients +4. Many packaged Kafka Connect plugins +5. GUI for managing and monitoring Kafka - Confluent Control Center + +Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version. +![TDengine Database Kafka Connector -- Confluent platform](kafka/confluentPlatform.webp) + +Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components. + +## Prerequisites + +1. Linux operating system +2. Java 8 and Maven installed +3. Git is installed +4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install) + +## Install Confluent + +Confluent provides two installation methods: Docker and binary packages. This article only introduces binary package installation. + +Execute in any directory: + +```` +curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz +tar xzf confluent-7.1.1.tar.gz -C /opt/test +```` + +Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH. + +```title=".profile" +export CONFLUENT_HOME=/opt/confluent-7.1.1 +PATH=$CONFLUENT_HOME/bin +export PATH +``` + +Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile) + +After the installation is complete, you can enter `confluent version` for simple verification: + +``` +# confluent version +confluent - Confluent CLI + +Version: v2.6.1 +Git Ref: 6d920590 +Build Date: 2022-02-18T06:14:21Z +Go Version: go1.17.6 (linux/amd64) +Development: false +``` + +## Install TDengine Connector plugin + +### Install from source code + +``` +git clone https://github.com:taosdata/kafka-connect-tdengine.git +cd kafka-connect-tdengine +mvn clean package +unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip +``` + +The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path. + +### Install with confluent-hub + +[Confluent Hub](https://www.confluent.io/hub) provides a service to download Kafka Connect plugins. After TDengine Kafka Connector is published to Confluent Hub, it can be installed using the command tool `confluent-hub`. +**TDengine Kafka Connector is currently not officially released and cannot be installed in this way**. + +## Start Confluent + +``` +confluent local services start +``` + +:::note +Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins. +::: + +:::tip +If a component fails to start, try clearing the data and restarting. The data directory will be printed to the console at startup, e.g.: + +```title="Console output log" {1} +Using CONFLUENT_CURRENT: /tmp/confluent.106668 +Starting ZooKeeper +ZooKeeper is [UP] +Starting Kafka +Kafka is [UP] +Starting Schema Registry +Schema Registry is [UP] +Starting Kafka REST +Kafka REST is [UP] +Starting Connect +Connect is [UP] +Starting ksqlDB Server +ksqlDB Server is [UP] +Starting Control Center +Control Center is [UP] +``` + +To clear data, execute `rm -rf /tmp/confluent.106668`. +::: + +### Check Confluent Services Status + +Use command bellow to check the status of all service: + +``` +confluent local services status +``` + +The expected output is: +``` +Connect is [UP] +Control Center is [UP] +Kafka is [UP] +Kafka REST is [UP] +ksqlDB Server is [UP] +Schema Registry is [UP] +ZooKeeper is [UP] +``` + +### Check Successfully Loaded Plugin + +After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully: +``` +confluent local services connect plugin list +``` + +The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow: + +``` +Available Connect Plugins: +[ + { + "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "type": "sink", + "version": "1.0.0" + }, + { + "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector", + "type": "source", + "version": "1.0.0" + }, +...... +``` + +If not, please check the log file of Kafka Connect. To view the log file path, please execute: + +``` +echo `cat /tmp/confluent.current`/connect/connect.stdout +``` +It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout` + +Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`. + +## The use of TDengine Sink Connector + +The role of the TDengine Sink Connector is to synchronize the data of the specified topic to TDengine. Users do not need to create databases and super tables in advance. The name of the target database can be specified manually (see the configuration parameter connection.database), or it can be generated according to specific rules (see the configuration parameter connection.database.prefix). + +TDengine Sink Connector internally uses TDengine [modeless write interface](/reference/connector/cpp#modeless write-api) to write data to TDengine, currently supports data in three formats: [InfluxDB line protocol format](/develop /insert-data/influxdb-line), [OpenTSDB Telnet protocol format](/develop/insert-data/opentsdb-telnet), and [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json). + +The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format. + +### Add configuration file + +``` +mkdir ~/test +cd ~/test +vi sink-demo.properties +``` + +sink-demo.properties' content is following: + +```ini title="sink-demo.properties" +name=TDengineSinkConnector +connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector +tasks.max=1 +topics=meters +connection.url=jdbc:TAOS://127.0.0.1:6030 +connection.user=root +connection.password=taosdata +connection.database=power +db.schemaless=line +data.precision=ns +key.converter=org.apache.kafka.connect.storage.StringConverter +value.converter=org.apache.kafka.connect.storage.StringConverter +``` + +Key configuration instructions: + +1. `topics=meters` and `connection.database=power` means to subscribe to the data of the topic meters and write to the database power. +2. `db.schemaless=line` means the data in the InfluxDB Line protocol format. + +### Create Connector instance + +```` +confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties +```` + +If the above command is executed successfully, the output is as follows: + +```json +{ + "name": "TDengineSinkConnector", + "config": { + "connection.database": "power", + "connection.password": "taosdata", + "connection.url": "jdbc:TAOS://127.0.0.1:6030", + "connection.user": "root", + "connector.class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "data.precision": "ns", + "db.schemaless": "line", + "key.converter": "org.apache.kafka.connect.storage.StringConverter", + "tasks.max": "1", + "topics": "meters", + "value.converter": "org.apache.kafka.connect.storage.StringConverter", + "name": "TDengineSinkConnector" + }, + "tasks": [], + "type": "sink" +} +``` + +### Write test data + +Prepare text file as test data, its content is following: + +```txt title="test-data.txt" +meters,location=California.LoSangeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000 +meters,location=California.LoSangeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000 +meters,location=California.LoSangeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000 +meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000 +``` + +Use kafka-console-producer to write test data to the topic `meters`. + +``` +cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters +``` + +:::note +TDengine Sink Connector will automatically create the database if the target database does not exist. The time precision used to create the database automatically is nanoseconds, which requires that the timestamp precision of the written data is also nanoseconds. An exception will be thrown if the timestamp precision of the written data is not nanoseconds. +::: + +### Verify that the sync was successful + +Use the TDengine CLI to verify that the sync was successful. + +``` +taos> use power; +Database changed. + +taos> select * from meters; + ts | current | voltage | phase | groupid | location | +=============================================================================================================================================================== + 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | + 2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles | + 2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles | +Query OK, 4 row(s) in set (0.004208s) +``` + +If you see the above data, the synchronization is successful. If not, check the logs of Kafka Connect. For detailed description of configuration parameters, see [Configuration Reference](#configuration-reference). + +## The use of TDengine Source Connector + +The role of the TDengine Source Connector is to push all the data of a specific TDengine database after a particular time to Kafka. The implementation principle of TDengine Source Connector is to first pull historical data in batches and then synchronize incremental data with the strategy of the regular query. At the same time, the changes in the table will be monitored, and the newly added table can be automatically synchronized. If Kafka Connect is restarted, synchronization will resume where it left off. + +TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](/develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json ) and then write to Kafka. + +The following sample program synchronizes the data in the database test to the topic tdengine-source-test. + +### Add configuration file + +``` +vi source-demo.properties +``` + +Input following content: + +```ini title="source-demo.properties" +name=TDengineSourceConnector +connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector +tasks.max=1 +connection.url=jdbc:TAOS://127.0.0.1:6030 +connection.username=root +connection.password=taosdata +connection.database=test +connection.attempts=3 +connection.backoff.ms=5000 +topic.prefix=tdengine-source- +poll.interval.ms=1000 +fetch.max.rows=100 +out.format=line +key.converter=org.apache.kafka.connect.storage.StringConverter +value.converter=org.apache.kafka.connect.storage.StringConverter +``` + +### Prepare test data + +Prepare SQL script file to generate test data + +```sql title="prepare-source-data.sql" +DROP DATABASE IF EXISTS test; +CREATE DATABASE test; +USE test; +CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT); +INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000); +``` + +Use TDengine CLI to execute SQL script + +``` +taos -f prepare-source-data.sql +``` + +### Create Connector instance + +```` +confluent local services connect connector load TDengineSourceConnector --config source-demo.properties +```` + +### View topic data + +Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. + +```` +kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test +```` + +output: + +```` +...... +meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 +meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 +...... +```` + +All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data: + +```` +USE test; +INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38); +INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22); +```` + +Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted. + +### unload plugin + +After testing, use the unload command to stop the loaded connector. + +View currently active connectors: + +```` +confluent local services connect connector status +```` + +You should now have two active connectors if you followed the previous steps. Use the following command to unload: + +```` +confluent local services connect connector unload TDengineSourceConnector +confluent local services connect connector unload TDengineSourceConnector +```` + +## Configuration reference + +### General configuration + +The following configuration items apply to TDengine Sink Connector and TDengine Source Connector. + +1. `name`: The name of the connector. +2. `connector.class`: The full class name of the connector, for example: com.taosdata.kafka.connect.sink.TDengineSinkConnector. +3. `tasks.max`: The maximum number of tasks, the default is 1. +4. `topics`: A list of topics to be synchronized, separated by commas, such as `topic1,topic2`. +5. `connection.url`: TDengine JDBC connection string, such as `jdbc:TAOS://127.0.0.1:6030`. +6. `connection.user`: TDengine username, default root. +7. `connection.password`: TDengine user password, default taosdata. +8. `connection.attempts` : The maximum number of connection attempts. Default 3. +9. `connection.backoff.ms`: The retry interval for connection creation failure, the unit is ms. Default is 5000. + +### TDengine Sink Connector specific configuration + +1. `connection.database`: The name of the target database. If the specified database does not exist, it will be created automatically. The time precision used for automatic library building is nanoseconds. The default value is null. When it is NULL, refer to the description of the `connection.database.prefix` parameter for the naming rules of the target database +2. `connection.database.prefix`: When `connection.database` is null, the prefix of the target database. Can contain placeholder '${topic}'. For example, kafka_${topic}, for topic 'orders' will be written to database 'kafka_orders'. Default null. When null, the name of the target database is the same as the name of the topic. +3. `batch.size`: Write the number of records in each batch in batches. When the data received by the sink connector at one time is larger than this value, it will be written in some batches. +4. `max.retries`: The maximum number of retries when an error occurs. Defaults to 1. +5. `retry.backoff.ms`: The time interval for retry when sending an error. The unit is milliseconds. The default is 3000. +6. `db.schemaless`: Data format, could be one of `line`, `json`, and `telnet`. Represent InfluxDB line protocol format, OpenTSDB JSON format, and OpenTSDB Telnet line protocol format. +7. `data.precision`: The time precision when use InfluxDB line protocol format data, could be one of `ms`, `us` and `ns`. The default is `ns`. + +### TDengine Source Connector specific configuration + +1. `connection.database`: source database name, no default value. +2. `topic.prefix`: topic name prefix after data is imported into kafka. Use `topic.prefix` + `connection.database` name as the full topic name. Defaults to the empty string "". +3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. Default "1970-01-01 00:00:00". +4. `poll.interval.ms`: Pull data interval, the unit is ms. Default is 1000. +5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database. Default is 100. +6. `out.format`: The data format. The value could be line or json. The line represents the InfluxDB Line protocol format, and json represents the OpenTSDB JSON format. Default is `line`. + + +## Other notes + +1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually. +2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect. + +## Feedback + +https://github.com/taosdata/kafka-connect-tdengine/issues + +## Reference + +1. https://www.confluent.io/what-is-apache-kafka +2. https://developer.confluent.io/learn-kafka/kafka-connect/intro +3. https://docs.confluent.io/platform/current/platform.html diff --git a/docs-en/20-third-party/emqx/add-action-handler.png b/docs-en/20-third-party/emqx/add-action-handler.png deleted file mode 100644 index 97a1f933ecfadfcab399938806d73c5a5ecc6427..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/add-action-handler.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/add-action-handler.webp b/docs-en/20-third-party/emqx/add-action-handler.webp new file mode 100644 index 0000000000000000000000000000000000000000..4a8d105f711991226cfbd43b6e9ab07d7ccc686a Binary files /dev/null and b/docs-en/20-third-party/emqx/add-action-handler.webp differ diff --git a/docs-en/20-third-party/emqx/check-result-in-taos.png b/docs-en/20-third-party/emqx/check-result-in-taos.png deleted file mode 100644 index c17a5c1ea2b9bbd49263056c8bf09c9aabab07d5..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/check-result-in-taos.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/check-result-in-taos.webp b/docs-en/20-third-party/emqx/check-result-in-taos.webp new file mode 100644 index 0000000000000000000000000000000000000000..8fa040a86104fece02ddaf8986f0a67de316143d Binary files /dev/null and b/docs-en/20-third-party/emqx/check-result-in-taos.webp differ diff --git a/docs-en/20-third-party/emqx/check-rule-matched.png b/docs-en/20-third-party/emqx/check-rule-matched.png deleted file mode 100644 index 9e9a466946a1afa857e2bbc07b14956dd0f984b6..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/check-rule-matched.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/check-rule-matched.webp b/docs-en/20-third-party/emqx/check-rule-matched.webp new file mode 100644 index 0000000000000000000000000000000000000000..e5a614035739df859b27c817f3b9f41be444b513 Binary files /dev/null and b/docs-en/20-third-party/emqx/check-rule-matched.webp differ diff --git a/docs-en/20-third-party/emqx/client-num.png b/docs-en/20-third-party/emqx/client-num.png deleted file mode 100644 index fff48cbf3b271c367079ddde425b3f9b014062f7..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/client-num.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/client-num.webp b/docs-en/20-third-party/emqx/client-num.webp new file mode 100644 index 0000000000000000000000000000000000000000..a151b184843607d67b649babb3145bfb3e329cda Binary files /dev/null and b/docs-en/20-third-party/emqx/client-num.webp differ diff --git a/docs-en/20-third-party/emqx/create-resource.png b/docs-en/20-third-party/emqx/create-resource.png deleted file mode 100644 index 58da4c391a3692b9f5fa348d952701eab8bcb746..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/create-resource.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/create-resource.webp b/docs-en/20-third-party/emqx/create-resource.webp new file mode 100644 index 0000000000000000000000000000000000000000..bf9cccbe49c57f925c5e6b094a4c0d88a64242cb Binary files /dev/null and b/docs-en/20-third-party/emqx/create-resource.webp differ diff --git a/docs-en/20-third-party/emqx/create-rule.png b/docs-en/20-third-party/emqx/create-rule.png deleted file mode 100644 index 73b0b6ee3e6065a142df98abe8c0dbb32b34f89d..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/create-rule.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/create-rule.webp b/docs-en/20-third-party/emqx/create-rule.webp new file mode 100644 index 0000000000000000000000000000000000000000..13e8fc83d48d2fd9d0a303c707ef3024d3ee5203 Binary files /dev/null and b/docs-en/20-third-party/emqx/create-rule.webp differ diff --git a/docs-en/20-third-party/emqx/edit-action.png b/docs-en/20-third-party/emqx/edit-action.png deleted file mode 100644 index 2a43ee369a439cf11cee23c11f25d6a84b26d7dc..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/edit-action.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/edit-action.webp b/docs-en/20-third-party/emqx/edit-action.webp new file mode 100644 index 0000000000000000000000000000000000000000..7f6d2e36a82b1917930e5d3969115db9359674a0 Binary files /dev/null and b/docs-en/20-third-party/emqx/edit-action.webp differ diff --git a/docs-en/20-third-party/emqx/edit-resource.png b/docs-en/20-third-party/emqx/edit-resource.png deleted file mode 100644 index 0a0b3560044f4ed6e0a8f040b74085a7e8948b1f..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/edit-resource.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/edit-resource.webp b/docs-en/20-third-party/emqx/edit-resource.webp new file mode 100644 index 0000000000000000000000000000000000000000..fd5d278fab16bba4e04e1c348d4086dce77abb98 Binary files /dev/null and b/docs-en/20-third-party/emqx/edit-resource.webp differ diff --git a/docs-en/20-third-party/emqx/login-dashboard.png b/docs-en/20-third-party/emqx/login-dashboard.png deleted file mode 100644 index d6c5035c98d860faf639ef6611c6719adf80c47b..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/login-dashboard.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/login-dashboard.webp b/docs-en/20-third-party/emqx/login-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..f84cee668fb6efe1586515ba0dee3ae2f10a5b30 Binary files /dev/null and b/docs-en/20-third-party/emqx/login-dashboard.webp differ diff --git a/docs-en/20-third-party/emqx/rule-engine.png b/docs-en/20-third-party/emqx/rule-engine.png deleted file mode 100644 index db110a837b024c82ee9d22f02dcd3a9d06abdd55..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/rule-engine.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/rule-engine.webp b/docs-en/20-third-party/emqx/rule-engine.webp new file mode 100644 index 0000000000000000000000000000000000000000..c1711c8cc757cd73fef5cb941a1818756241f7f0 Binary files /dev/null and b/docs-en/20-third-party/emqx/rule-engine.webp differ diff --git a/docs-en/20-third-party/emqx/rule-header-key-value.png b/docs-en/20-third-party/emqx/rule-header-key-value.png deleted file mode 100644 index b81b9a9684aa2f98d00b7ec21e5de411fb450312..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/rule-header-key-value.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/rule-header-key-value.webp b/docs-en/20-third-party/emqx/rule-header-key-value.webp new file mode 100644 index 0000000000000000000000000000000000000000..e645b3822dffec86f4926e78a57eaffa1e7f4d8d Binary files /dev/null and b/docs-en/20-third-party/emqx/rule-header-key-value.webp differ diff --git a/docs-en/20-third-party/emqx/run-mock.png b/docs-en/20-third-party/emqx/run-mock.png deleted file mode 100644 index 0da25818575247732d5d3a783aa52cf7ce24662c..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/emqx/run-mock.png and /dev/null differ diff --git a/docs-en/20-third-party/emqx/run-mock.webp b/docs-en/20-third-party/emqx/run-mock.webp new file mode 100644 index 0000000000000000000000000000000000000000..ed33f1666d456f1ab40ed6830af4550d4c7ca037 Binary files /dev/null and b/docs-en/20-third-party/emqx/run-mock.webp differ diff --git a/docs-en/20-third-party/grafana/add_datasource1.jpg b/docs-en/20-third-party/grafana/add_datasource1.jpg deleted file mode 100644 index 1f0f5110f312c57f3ec1788bbc02f04fac6ac142..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/grafana/add_datasource1.jpg and /dev/null differ diff --git a/docs-en/20-third-party/grafana/add_datasource1.webp b/docs-en/20-third-party/grafana/add_datasource1.webp new file mode 100644 index 0000000000000000000000000000000000000000..211edc4457abd0db6b0ef64636d61d65b5f43db6 Binary files /dev/null and b/docs-en/20-third-party/grafana/add_datasource1.webp differ diff --git a/docs-en/20-third-party/grafana/add_datasource2.jpg b/docs-en/20-third-party/grafana/add_datasource2.jpg deleted file mode 100644 index fa7a83e00e96fae649910dff4edf5f5bdadd7850..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/grafana/add_datasource2.jpg and /dev/null differ diff --git a/docs-en/20-third-party/grafana/add_datasource2.webp b/docs-en/20-third-party/grafana/add_datasource2.webp new file mode 100644 index 0000000000000000000000000000000000000000..8ab547231fee4d3b0874fcfe08c0ce152b0c53a1 Binary files /dev/null and b/docs-en/20-third-party/grafana/add_datasource2.webp differ diff --git a/docs-en/20-third-party/grafana/add_datasource3.jpg b/docs-en/20-third-party/grafana/add_datasource3.jpg deleted file mode 100644 index fc850ad08ff1174de972906842e0d5ee64e6e5cb..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/grafana/add_datasource3.jpg and /dev/null differ diff --git a/docs-en/20-third-party/grafana/add_datasource3.webp b/docs-en/20-third-party/grafana/add_datasource3.webp new file mode 100644 index 0000000000000000000000000000000000000000..d8a733360a09b4425c571f254a9ecb298c04b72f Binary files /dev/null and b/docs-en/20-third-party/grafana/add_datasource3.webp differ diff --git a/docs-en/20-third-party/grafana/add_datasource4.jpg b/docs-en/20-third-party/grafana/add_datasource4.jpg deleted file mode 100644 index 3ba73e50d455111f8621f4165746078554c2d790..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/grafana/add_datasource4.jpg and /dev/null differ diff --git a/docs-en/20-third-party/grafana/add_datasource4.webp b/docs-en/20-third-party/grafana/add_datasource4.webp new file mode 100644 index 0000000000000000000000000000000000000000..b1e0fc6e2b27df4af1bb5ad92756bcb5d4fda63e Binary files /dev/null and b/docs-en/20-third-party/grafana/add_datasource4.webp differ diff --git a/docs-en/20-third-party/grafana/create_dashboard1.jpg b/docs-en/20-third-party/grafana/create_dashboard1.jpg deleted file mode 100644 index 3b83c3a1714e9e7540e0b06239ef7c1c4f63fe2c..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/grafana/create_dashboard1.jpg and /dev/null differ diff --git a/docs-en/20-third-party/grafana/create_dashboard1.webp b/docs-en/20-third-party/grafana/create_dashboard1.webp new file mode 100644 index 0000000000000000000000000000000000000000..55eb388833e4df2a46f4d1cf6d346aa11429385d Binary files /dev/null and b/docs-en/20-third-party/grafana/create_dashboard1.webp differ diff --git a/docs-en/20-third-party/grafana/create_dashboard2.jpg b/docs-en/20-third-party/grafana/create_dashboard2.jpg deleted file mode 100644 index fe5d768ac55254251e0290bf257178f5ff28f5a5..0000000000000000000000000000000000000000 Binary files a/docs-en/20-third-party/grafana/create_dashboard2.jpg and /dev/null differ diff --git a/docs-en/20-third-party/grafana/create_dashboard2.webp b/docs-en/20-third-party/grafana/create_dashboard2.webp new file mode 100644 index 0000000000000000000000000000000000000000..bb40e407187718c52e9f617d8ebd3d25fd14b56b Binary files /dev/null and b/docs-en/20-third-party/grafana/create_dashboard2.webp differ diff --git a/docs-en/20-third-party/index.md b/docs-en/20-third-party/index.md index 45f70abc844eb3022e786fbf023b82fc06f22367..87bd9e075133d1182ee93d1c1c43617c766755b9 100644 --- a/docs-en/20-third-party/index.md +++ b/docs-en/20-third-party/index.md @@ -2,11 +2,11 @@ title: Third Party Tools --- -TDengine's support for standard SQL commands, common database connector standards (e.g., JDBC), ORM, and other popular time-series database writing protocols (e.g., InfluxDB Line Protocol, OpenTSDB JSON, OpenTSDB Telnet, etc.) makes TDengine very easy to use with third-party tools. +Since TDengine supports standard SQL commands, common database connector standards (e.g., JDBC), ORM, and other popular time-series database writing protocols (e.g., InfluxDB Line Protocol, OpenTSDB JSON, OpenTSDB Telnet, etc.), it is very easy to integrate TDengine with other third party tools. You only need to provide simple configuration, the integration can be done without a line of code. ```mdx-code-block import DocCardList from '@theme/DocCardList'; import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; -``` \ No newline at end of file +``` diff --git a/docs-en/20-third-party/kafka/Kafka_Connect.webp b/docs-en/20-third-party/kafka/Kafka_Connect.webp new file mode 100644 index 0000000000000000000000000000000000000000..8f2000a749b0a2ccec9939abd144c53c44fbe171 Binary files /dev/null and b/docs-en/20-third-party/kafka/Kafka_Connect.webp differ diff --git a/docs-en/20-third-party/kafka/confluentPlatform.webp b/docs-en/20-third-party/kafka/confluentPlatform.webp new file mode 100644 index 0000000000000000000000000000000000000000..ff03d4e51aaaec85f07ff41ecda0fb9bd6cb2847 Binary files /dev/null and b/docs-en/20-third-party/kafka/confluentPlatform.webp differ diff --git a/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.webp b/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.webp new file mode 100644 index 0000000000000000000000000000000000000000..120d534ec132cea2ccef6cf87a3ce680a5ac6e9c Binary files /dev/null and b/docs-en/20-third-party/kafka/streaming-integration-with-kafka-connect.webp differ diff --git a/docs-en/21-tdinternal/01-arch.md b/docs-en/21-tdinternal/01-arch.md index 9607c9b38709f6a320f82a8ee250afb407492627..4d8bed4d2d6b3a0404e10213aeab599767325cc2 100644 --- a/docs-en/21-tdinternal/01-arch.md +++ b/docs-en/21-tdinternal/01-arch.md @@ -5,38 +5,38 @@ title: Architecture ## Cluster and Primary Logic Unit -The design of TDengine is based on the assumption that any hardware or software system is not 100% reliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, TDengine has been designed in a distributed and high-reliability architecture since day one of the development, so that hardware failure or software failure of any single even multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware resources significantly. +The design of TDengine is based on the assumption that any hardware or software system is not 100% reliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, since day one, TDengine has been designed as a natively distributed system, with high-reliability architecture. Hardware failure or software failure of a single, or even multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware resource needs, significantly. ### Primary Logic Unit -Logical structure diagram of TDengine distributed architecture as following: +Logical structure diagram of TDengine's distributed architecture is as follows: -![TDengine architecture diagram](structure.png) +![TDengine Database architecture diagram](structure.webp)
Figure 1: TDengine architecture diagram
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit. **Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine, or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please check [wikipedia](https://en.wikipedia.org/wiki/Fully_qualified_domain_name). -**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node. A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE), zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes. +**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node (pnode). A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE) and zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes. -**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node. +**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node. -**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction. +**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction. -**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. +**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. -**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster. +**TAOSC**: TAOSC is the driver provided by TDengine to applications. It is responsible for dealing with the interaction between application and cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster. ### Node Communication -**Communication mode**: The communication among each data node of TDengine system, and among the client driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation. +**Communication mode**: The communication among each data node of TDengine system, and among the client driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digitally sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation. **FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter “fqdn”. If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter “fqdn” of the node to its IP address. However, IP is not recommended because IP address may be changed, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the DNS service is running, or hosts files on nodes are configured properly. **Port configuration**: The external port of a data node is determined by the system configuration parameter “serverPort” in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. To run the system, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort. -**Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted. +**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted. **Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: @@ -44,31 +44,33 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc 2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; 3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again. -**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server-side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. +**The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. -**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"; Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the detailed user tutorial for detailed steps. In this way, the cluster will be established step by step. +**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. +- Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode" +- Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step. -**Redirection**: No matter about dnode or TAOSC, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not a mnode by self, it will reply to the mnode EP List back. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes. +**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not an mnode itself, it will reply to the mnode with the EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes. ### A Typical Data Writing Process To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process. -![typical process of TDengine](message.png) +![typical process of TDengine Database](message.webp)
Figure 2: Typical process of TDengine
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs. -2. TAOSC checks if meta data existing for the table in the cache. If so, go straight to Step 4. If not, TAOSC sends a get meta-data request to mnode. +2. TAOSC checks the cache to see if meta data exists for the table. If it does, it goes straight to Step 4. If not, TAOSC sends a get meta-data request to mnode. 3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode. 4. TAOSC initiates an insert request to master vnode. 5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup. 6. TAOSC notifies APP that writing is successful. -For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will inform the mnode EP list in a reply message, so that TAOSC will re-issue a request to obtain meta-data to the EP of another new mnode. +For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to obtain meta-data to the EP of another mnode. -For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target where TAOSC shall send a request to. Once the reply of successful insertion is obtained, TAOSC will cache the information of master node. +For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of master node. -The above is the process of inserting data, and the processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications. +The above describes the process of inserting data. The processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications. Through TAOSC caching mechanism, mnode needs to be accessed only when a table is accessed for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), TAOSC will interact with mnode regularly to automatically update the cache. @@ -76,24 +78,24 @@ Through TAOSC caching mechanism, mnode needs to be accessed only when a table is ### Storage Model -The data stored by TDengine include collected time-series data, metadata related to database and tables, tag data, etc. These data are specifically divided into three parts: +The data stored by TDengine includes collected time-series data, metadata related to database and tables, tag data, etc. All of the data is specifically divided into three parts: -- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database “update” parameter is set to 1. By adopting the model with **one table for each data collection point**, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single data collection point with the best performance. -- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the tag filtering results will return in milliseconds. -- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of create, delete, update and read are supported. The amount of these data are not large and can be stored in memory, moreover, the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck. +- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database “update” parameter is set to 1. By adopting the model with **one table for each data collection point**, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the best performance for both insert and query operations of a single data collection point. +- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even with millions of tables, the tag filtering results will return in milliseconds. +- Metadata: stored in mnode and includes system node, user, DB, table schema and other information. Four standard operations of create, delete, update and read are supported. The amount of this data is not large and can be stored in memory. Moreover, the number of queries is not large because of client cache. Even though TDengine uses centralized storage management, because of the architecture, there is no performance bottleneck. -Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately, which has two major advantages: +Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately. This has two major advantages: -- Reduce the redundancy of tag data storage significantly: general NoSQL database or time-series database adopts K-V storage, in which Key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is extremely expensive to operate. -- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds out the tables which satisfy the filtering conditions, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. +- Reduces the redundancy of tag data storage significantly. General NoSQL database or time-series database adopts K-V (key-value) storage, in which the key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is an extremely expensive operation. +- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds the tables which satisfy the filtering conditions, and then finds the corresponding data blocks of these tables. This greatly reduces the data sets to be scanned which in turn improves the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. ### Data Sharding -For large-scale data management, to achieve scale-out, it is generally necessary to adopt the Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for a time range. +For large-scale data management, to achieve scale-out, it is generally necessary to adopt a Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for a time range. VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application. -For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G), so TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores. +For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores. When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes. @@ -101,43 +103,43 @@ The meta data of each table (including schema, tags, etc.) is also stored in vno ### Data Partitioning -In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter `“days”`. This method of partitioning by time rang is also convenient to efficiently implement the data retention policy. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the tiered-storage. Cold/hot data can be stored in different storage media to reduce the storage cost. +In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by the database configuration parameter `“days”`. This method of partitioning by time range is also convenient to efficiently implement data retention policies. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate tiered-storage. Cold/hot data can be stored in different storage media to significantly reduce storage costs. In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability. ### Load Balancing -Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node), so mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected. +Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) so that the mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected. -If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. When offline lasts a certain period of time (configured by parameter `“offlineThreshold”`), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number. +If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. If the dnode stays offline beyond the time configured by parameter `“offlineThreshold”`, the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number. -When new data nodes are added to the cluster, with new computing and storage resources are added, the system will automatically start the load balancing process. +When new data nodes are added to the cluster, with new computing and storage resources, the system will automatically start the load balancing process. The load balancing process does not require any manual intervention, and it is transparent to the application. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.** ## Data Writing and Replication Process -If a database has N replicas, thus a virtual node group has N virtual nodes, but only one as Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect. +If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect. ### Master vnode Writing Process Master Vnode uses a writing process as follows: -![TDengine Master Writing Process](write_master.png) +![TDengine Database Master Writing Process](write_master.webp)
Figure 3: TDengine Master writing process
1. Master vnode receives the application data insertion request, verifies, and moves to next step; 2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; 3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data; 4. Write into memory and add the record to “skip list”; -5. Master vnode returns a confirmation message to the application, indicating a successful writing. +5. Master vnode returns a confirmation message to the application, indicating a successful write. 6. If any of Step 2, 3 or 4 fails, the error will directly return to the application. ### Slave vnode Writing Process For a slave vnode, the write process as follows: -![TDengine Slave Writing Process](write_slave.png) +![TDengine Database Slave Writing Process](write_slave.webp)
Figure 4: TDengine Slave Writing Process
1. Slave vnode receives a data insertion request forwarded by Master vnode; @@ -146,19 +148,19 @@ For a slave vnode, the write process as follows: Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same. -### Remote Disaster Recovery and IDC Migration +### Remote Disaster Recovery and IDC (Internet Data Center) Migration -As above Master and Slave processes discussed, TDengine adopts asynchronous replication for data synchronization. This method can greatly improve the writing performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. +As discussed above, TDengine writes using Master and Slave processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. -On the other hand, TDengine supports dynamic modification of the replicas number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization completed, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. +On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. -However, the asynchronous replication has a tiny time window where data can be lost. The specific scenario is as follows: +However, the asynchronous replication has a very low probability scenario where data may be lost. The specific scenario is as follows: -1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then went down; +1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down; 2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2; 3. Slave vnode will become the new master, thus losing one record. -In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before. +In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this is an extremely low probability scenario as described above. Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet** @@ -171,43 +173,43 @@ When a vnode starts, the roles (master, slave) are uncertain, and the data is in 1. If there’s only one replica, it’s always master 2. When all replicas are online, the one with latest version is master 3. Over half of online nodes are virtual nodes, and some virtual node is slave, it will automatically become master -4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master +4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master. ### Synchronous Replication For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application. -With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication. +With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistency, the default for data synchronization between mnodes is synchronous replication. ## Caching and Persistence ### Caching -TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly put the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the newly generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer. +TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly puts the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the most recently generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer. -TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data cache by setting appropriate configuration parameters without deploying Redis or other additional cache systems**, which can effectively simplify the system architecture and reduce the operation costs. It should be noted that after the TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer as so in a proprietary key-value cache system. +TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data cache by setting appropriate configuration parameters without deploying Redis or other additional cache systems**. This can effectively simplify the system architecture and reduce operational costs. It should be noted that after TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer. In this sense, TDengine's cache differs from proprietary key-value cache systems. Each vnode has its own independent memory, and it is composed of multiple memory blocks of fixed size, and different vnodes are completely isolated. When writing data, similar to the writing of logs, data is sequentially added to memory, but each vnode maintains its own skip list for quick search. When more than one third of the memory block are used, the disk writing operation will start, and the subsequent writing operation is carried out in a new memory block. By this design, one third of the memory blocks in a vnode keep the latest data, so as to achieve the purpose of caching and quick search. The number of memory blocks of a vnode is determined by the configuration parameter “blocks”, and the size of memory blocks is determined by the configuration parameter “cache”. ### Persistent Storage -TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will also pull up the disk-writing thread to write the cached data into persistent storage in order not to block subsequent data writing. TDengine will open a new database log file when the data is written, and delete the old database log file after written successfully to avoid unlimited log growth. +TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will pull up the disk-writing thread to write the cached data into persistent storage so that subsequent data writing is not blocked. TDengine will open a new database log file when the data is written, and delete the old database log file after successfull persistence, to avoid unlimited log growth. -To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `“days”`. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations. +To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `“days”`. Thus for given start and end dates of a query, you can locate the data files to open immediately without any index. This greatly speeds up read operations. For time-series data, there is generally a retention policy, which is determined by the system configuration parameter `“keep”`. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space. Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set. -In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `“maxRows”` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. +In each data file, the data of a table is stored in blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `“maxRows”` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, data location for queries will take a longer tim. If it is too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. -Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `“minRows”` (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file. +Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information which allows the system to locate the data to be found very quickly. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `“minRows”` (minimum number of records per block), it will be stored in the last file first. At the next write operation to the disk, the newly written records will be merged with the records in last file and then written into data file. -When data is written to disk, it is decided whether to compress the data according to system configuration parameter `“comp”`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. +When data is written to disk, the system decideswhether to compress the data based on the system configuration parameter `“comp”`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. ### Tiered Storage -By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data for more than one week is stored on local hard disk, and the data for more than four weeks is stored on network storage device, thus reducing the storage cost and ensuring efficient data access. The movement of data on different storage media is automatically done by the system and completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”. +By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”. dataDir format is as follows: ``` @@ -216,7 +218,7 @@ dataDir data_path [tier_level] Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data. -Suppose a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows: +Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows: ``` dataDir /mnt/disk1/taos @@ -233,11 +235,11 @@ Note: Tiered Storage is only supported in Enterprise Edition ## Data Query -TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. The query processing of TDengine needs the collaboration of client, vnode and mnode. +TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. Query processing in TDengine needs the collaboration of client, vnode and mnode. ### Single Table Query -The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then request metadata information (table metadata) for the table specified in the query from management node (mnode). +The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then metadata information (table metadata) for the table specified is requested in the query from management node (mnode). According to the End Point information in metadata information, the query request is serialized and sent to the data node (dnode) where the table is located. After receiving the query, the dnode identifies the virtual node (vnode) pointed to and forwards the message to the query execution queue of the vnode. The query execution thread of vnode establishes the basic query execution environment, immediately returns the query request and starts executing the query at the same time. @@ -245,9 +247,9 @@ When client obtains query result, the worker thread in query execution queue of ### Aggregation by Time Axis, Downsampling, Interpolation -The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and distinct feature from common databases. From this point of view, it is similar to the window query of stream computing engine. +Time-series data is different from ordinary data in that each record has a timestamp. So aggregating data by timestamps on the time axis is an important and distinct feature of time-series databases which is different from that of common databases. It is similar to the window query of stream computing engines. -The keyword `interval` is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated based on time windows, and the data within window range are aggregated as needed. For example: +The keyword `interval` is introduced into TDengine to split fixed length time windows on the time axis. The data is aggregated based on time windows, and the data within time window ranges is aggregated as needed. For example: ```mysql select count(*) from d1001 interval(1h); @@ -265,21 +267,21 @@ For the data collected by device D1001, the number of records per hour is counte ### Multi-table Aggregation Query -TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure: +TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure: -![Diagram of multi-table aggregation query](multi_tables.png) +![TDengine Database Diagram of multi-table aggregation query](multi_tables.webp)
Figure 5: Diagram of multi-table aggregation query
1. Application sends a query condition to system; 2. TAOSC sends the STable name to Meta Node(management node); 3. Management node sends the vnode list owned by the STable back to TAOSC; 4. TAOSC sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes; -5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC; +5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC; 6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application. -Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. +Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. ### Precomputation -In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the index BRIN (Block Range Index) of PostgreSQL. +In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the BRIN (Block Range Index) of PostgreSQL. diff --git a/docs-en/21-tdinternal/_category_.yml b/docs-en/21-tdinternal/_category_.yml index 9d3df5ce3eb8e8ba3e1b01eb4a2c2ef89b631665..38a7f9a7641200e47868a693f3935216a333f44d 100644 --- a/docs-en/21-tdinternal/_category_.yml +++ b/docs-en/21-tdinternal/_category_.yml @@ -1 +1 @@ -label: TDengine Inside \ No newline at end of file +label: Inside TDengine diff --git a/docs-en/21-tdinternal/dnode.png b/docs-en/21-tdinternal/dnode.png deleted file mode 100644 index cea87dcccba5d2761996e5dde998022d86487eb9..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/dnode.png and /dev/null differ diff --git a/docs-en/21-tdinternal/dnode.webp b/docs-en/21-tdinternal/dnode.webp new file mode 100644 index 0000000000000000000000000000000000000000..a56c7e4594df00a721cb48381d68ca3bc813cdc8 Binary files /dev/null and b/docs-en/21-tdinternal/dnode.webp differ diff --git a/docs-en/21-tdinternal/message.png b/docs-en/21-tdinternal/message.png deleted file mode 100644 index 715a8bd37ee9fe7e96eacce4e7ff563fedeefbee..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/message.png and /dev/null differ diff --git a/docs-en/21-tdinternal/message.webp b/docs-en/21-tdinternal/message.webp new file mode 100644 index 0000000000000000000000000000000000000000..a2a42abff3d6e932b41a3abe9feae4a5cc13c9e5 Binary files /dev/null and b/docs-en/21-tdinternal/message.webp differ diff --git a/docs-en/21-tdinternal/modules.png b/docs-en/21-tdinternal/modules.png deleted file mode 100644 index 10ae4703a6cbbf66afea325ce4c0f919f7769a07..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/modules.png and /dev/null differ diff --git a/docs-en/21-tdinternal/modules.webp b/docs-en/21-tdinternal/modules.webp new file mode 100644 index 0000000000000000000000000000000000000000..718a6abccdbe40d4a0df5e3812fe0ab943a7c523 Binary files /dev/null and b/docs-en/21-tdinternal/modules.webp differ diff --git a/docs-en/21-tdinternal/multi_tables.png b/docs-en/21-tdinternal/multi_tables.png deleted file mode 100644 index 0cefaab6a9a4cdd671c671f7c6186dea41415ff0..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/multi_tables.png and /dev/null differ diff --git a/docs-en/21-tdinternal/multi_tables.webp b/docs-en/21-tdinternal/multi_tables.webp new file mode 100644 index 0000000000000000000000000000000000000000..8f649e34a3a62d1b11b4403b2e743ff6b5e47be2 Binary files /dev/null and b/docs-en/21-tdinternal/multi_tables.webp differ diff --git a/docs-en/21-tdinternal/replica-forward.png b/docs-en/21-tdinternal/replica-forward.png deleted file mode 100644 index bf616e030b130603eceb5dccfd30b4a1dfa68ea5..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/replica-forward.png and /dev/null differ diff --git a/docs-en/21-tdinternal/replica-forward.webp b/docs-en/21-tdinternal/replica-forward.webp new file mode 100644 index 0000000000000000000000000000000000000000..512efd4eba8f23ad0f8607eaaf5525f51ecdcf0e Binary files /dev/null and b/docs-en/21-tdinternal/replica-forward.webp differ diff --git a/docs-en/21-tdinternal/replica-master.png b/docs-en/21-tdinternal/replica-master.png deleted file mode 100644 index cb33f1ce98661563693215d8fc73b003235c7668..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/replica-master.png and /dev/null differ diff --git a/docs-en/21-tdinternal/replica-master.webp b/docs-en/21-tdinternal/replica-master.webp new file mode 100644 index 0000000000000000000000000000000000000000..57030a11f563af2689dbcfd206183f410b121aee Binary files /dev/null and b/docs-en/21-tdinternal/replica-master.webp differ diff --git a/docs-en/21-tdinternal/replica-restore.png b/docs-en/21-tdinternal/replica-restore.png deleted file mode 100644 index 1558e5ed0108d23efdc6b5d9ea0e44a1dff45d28..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/replica-restore.png and /dev/null differ diff --git a/docs-en/21-tdinternal/replica-restore.webp b/docs-en/21-tdinternal/replica-restore.webp new file mode 100644 index 0000000000000000000000000000000000000000..f282c2d4d23f517e3ef08e906cea7e9c5edc0b2a Binary files /dev/null and b/docs-en/21-tdinternal/replica-restore.webp differ diff --git a/docs-en/21-tdinternal/structure.png b/docs-en/21-tdinternal/structure.png deleted file mode 100644 index 4fc8f47ab0a30d95b85ba1d85105726ed981e56e..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/structure.png and /dev/null differ diff --git a/docs-en/21-tdinternal/structure.webp b/docs-en/21-tdinternal/structure.webp new file mode 100644 index 0000000000000000000000000000000000000000..b77a42c074b15302b5c3ab889fb550a46dd549b3 Binary files /dev/null and b/docs-en/21-tdinternal/structure.webp differ diff --git a/docs-en/21-tdinternal/vnode.png b/docs-en/21-tdinternal/vnode.png deleted file mode 100644 index e6148d4907cf9a18bc52251f712d5c685651b7f5..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/vnode.png and /dev/null differ diff --git a/docs-en/21-tdinternal/vnode.webp b/docs-en/21-tdinternal/vnode.webp new file mode 100644 index 0000000000000000000000000000000000000000..fae3104c89c542c26790b509d12ad56661082c32 Binary files /dev/null and b/docs-en/21-tdinternal/vnode.webp differ diff --git a/docs-en/21-tdinternal/write_master.png b/docs-en/21-tdinternal/write_master.png deleted file mode 100644 index ff2dfc20bfc2ecf956a2aab1a8965a7bbcae4387..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/write_master.png and /dev/null differ diff --git a/docs-en/21-tdinternal/write_master.webp b/docs-en/21-tdinternal/write_master.webp new file mode 100644 index 0000000000000000000000000000000000000000..9624036ed3d46ed60924ead9ce5c61acee0f4652 Binary files /dev/null and b/docs-en/21-tdinternal/write_master.webp differ diff --git a/docs-en/21-tdinternal/write_slave.png b/docs-en/21-tdinternal/write_slave.png deleted file mode 100644 index cacb2cb6bcc4f4d934e979862387e1345bbac078..0000000000000000000000000000000000000000 Binary files a/docs-en/21-tdinternal/write_slave.png and /dev/null differ diff --git a/docs-en/21-tdinternal/write_slave.webp b/docs-en/21-tdinternal/write_slave.webp new file mode 100644 index 0000000000000000000000000000000000000000..7c45dec11b00e6a738de458f9e1bedacfad75a96 Binary files /dev/null and b/docs-en/21-tdinternal/write_slave.webp differ diff --git a/docs-en/25-application/01-telegraf.md b/docs-en/25-application/01-telegraf.md index 718e04ecd3dbd2a72feba3f5297d9da33a94ba83..d30a23fe1b942e1411e8b5f1320e1c54ae2b407f 100644 --- a/docs-en/25-application/01-telegraf.md +++ b/docs-en/25-application/01-telegraf.md @@ -5,18 +5,18 @@ title: Quickly Build IT DevOps Visualization System with TDengine + Telegraf + G ## Background -TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telematics, Industrial Internet, IT DevOps, etc. by TAOSData. Since it opened its source code in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. +TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telemetry, Industrial Internet, IT DevOps and other applications. Since it was open-sourced in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. IT DevOps metric data usually are time sensitive, for example: - System resource metrics: CPU, memory, IO, bandwidth, etc. - Software system metrics: health status, number of connections, number of requests, number of timeouts, number of errors, response time, service type, and other business-related metrics. -Current mainstream IT DevOps system usually include a data collection module, a data persistent module, and a visualization module; Telegraf and Grafana are one of the most popular data collection modules and visualization modules, respectively. The data persistent module is available in a wide range of options, with OpenTSDB or InfluxDB being the most popular. TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. +Current mainstream IT DevOps system usually include a data collection module, a data persistent module, and a visualization module; Telegraf and Grafana are one of the most popular data collection modules and visualization modules, respectively. The data persistence module is available in a wide range of options, with OpenTSDB or InfluxDB being the most popular. TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. -This article introduces how to quickly build a TDengine + Telegraf + Grafana based IT DevOps visualization system without writing even a single line of code and by simply modifying a few lines of configuration files. The architecture is as follows. +This article introduces how to quickly build a TDengine + Telegraf + Grafana based IT DevOps visualization system without writing even a single line of code and by simply modifying a few lines in configuration files. The architecture is as follows. -![IT-DevOps-Solutions-Telegraf.png](/img/IT-DevOps-Solutions-Telegraf.png) +![TDengine Database IT-DevOps-Solutions-Telegraf](./IT-DevOps-Solutions-Telegraf.webp) ## Installation steps @@ -73,11 +73,11 @@ sudo systemctl start telegraf Log in to the Grafana interface using a web browser at `IP:3000`, with the system's initial username and password being `admin/admin`. Click on the gear icon on the left and select `Plugins`, you should find the TDengine data source plugin icon. -Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard- v0.1.0.json`, download the dashboard JSON file and import it. You will then see the dashboard in the following screen. +Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json`, download the dashboard JSON file and import it. You will then see the dashboard in the following screen. -![IT-DevOps-Solutions-telegraf-dashboard.png](/img/IT-DevOps-Solutions-telegraf-dashboard.png) +![TDengine Database IT-DevOps-Solutions-telegraf-dashboard](./IT-DevOps-Solutions-telegraf-dashboard.webp) ## Wrap-up -The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and the powerful ecological software adaptation capability, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes. +The above demonstrates how to quickly build a IT DevOps visualization system. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system in just a few minutes. Please refer to the official documentation and product implementation cases for other features. diff --git a/docs-en/25-application/02-collectd.md b/docs-en/25-application/02-collectd.md index 2ac37618fafe11e71b215313e53f89b6c302f7cb..1733ed1b1af8c9375c3773d1ca86831396499a78 100644 --- a/docs-en/25-application/02-collectd.md +++ b/docs-en/25-application/02-collectd.md @@ -5,19 +5,19 @@ title: Quickly build an IT DevOps visualization system using TDengine + collectd ## Background -TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telematics, Industrial Internet, IT DevOps, etc. by TAOSData. Since it opened its source code in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. +TDengine is a big data platform designed and optimized for IoT (Internet of Things), Vehicle Telemetry, Industrial Internet, IT DevOps and other applications. Since it was open-sourced in July 2019, it has won the favor of a large number of time-series data developers with its innovative data modeling design, convenient installation, easy-to-use programming interface, and powerful data writing and query performance. IT DevOps metric data usually are time sensitive, for example: - System resource metrics: CPU, memory, IO, bandwidth, etc. - Software system metrics: health status, number of connections, number of requests, number of timeouts, number of errors, response time, service type, and other business-related metrics. -The current mainstream IT DevOps visualization system usually contains a data collection module, a data persistent module, and a visual display module. collectd/StatsD, as an old-fashion open source data collection tool, has a wide user base. However, collectd/StatsD has limited functionality, and often needs to be combined with Telegraf, Grafana, and a time-series database to build a complete monitoring system. +The current mainstream IT DevOps visualization system usually contains a data collection module, a data persistence module, and a visual display module. collectd/StatsD, as an old-fashion open source data collection tool, has a wide user base. However, collectd/StatsD has limited functionality, and often needs to be combined with Telegraf, Grafana, and a time-series database to build a complete monitoring system. The new version of TDengine supports multiple data protocols and can accept data from collectd and StatsD directly, and provides Grafana dashboard for graphical display. -This article introduces how to quickly build an IT DevOps visualization system based on TDengine + collectd / StatsD + Grafana without writing even a single line of code but by simply modifying a few lines of configuration files. The architecture is shown in the following figure. +This article introduces how to quickly build an IT DevOps visualization system based on TDengine + collectd / StatsD + Grafana without writing even a single line of code but by simply modifying a few lines in configuration files. The architecture is shown in the following figure. -![IT-DevOps-Solutions-Collectd-StatsD.png](/img/IT-DevOps-Solutions-Collectd-StatsD.png) +![TDengine Database IT-DevOps-Solutions-Collectd-StatsD](./IT-DevOps-Solutions-Collectd-StatsD.webp) ## Installation Steps @@ -83,22 +83,22 @@ Click on the gear icon on the left and select `Plugins`, you should find the TDe Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json`, click the plus icon on the left and select Import, follow the instructions to import the JSON file. After that, you can see The dashboard can be seen in the following screen. -![IT-DevOps-Solutions-collectd-dashboard.png](/img/IT-DevOps-Solutions-collectd-dashboard.png) +![TDengine Database IT-DevOps-Solutions-collectd-dashboard](./IT-DevOps-Solutions-collectd-dashboard.webp) #### import collectd dashboard Download the dashboard json file from `https://github.com/taosdata/grafanaplugin/blob/master/examples/collectd/grafana/dashboards/collect-metrics-with-tdengine-v0.1.0.json`. Download the dashboard json file, click the plus icon on the left side and select `Import`, and follow the interface prompts to select the JSON file to import. After that, you can see dashboard with the following interface. -![IT-DevOps-Solutions-collectd-dashboard.png](/img/IT-DevOps-Solutions-collectd-dashboard.png) +![IT-DevOps-Solutions-collectd-dashboard](./IT-DevOps-Solutions-collectd-dashboard.webp) #### Importing the StatsD dashboard Download the dashboard json from `https://github.com/taosdata/grafanaplugin/blob/master/examples/statsd/dashboards/statsd-with-tdengine-v0.1.0.json`. Click on the plus icon on the left and select `Import`, and follow the interface prompts to import the JSON file. You will then see the dashboard in the following screen. -![IT-DevOps-Solutions-statsd-dashboard.png](/img/IT-DevOps-Solutions-statsd-dashboard.png) +![TDengine Database IT-DevOps-Solutions-statsd-dashboard](./IT-DevOps-Solutions-statsd-dashboard.webp) ## Wrap-up -TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing function in TDengine version 2.4.0.0 and the powerful ecological software adaptation capability, users can build an efficient and easy-to-use IT DevOps visualization system or adapt to an existing system in just a few minutes. +TDengine, as an emerging time-series big data platform, has the advantages of high performance, high reliability, easy management and easy maintenance. Thanks to the new schemaless protocol parsing feature in TDengine version 2.4.0.0 and ability to integrate easily with a large software ecosystem, users can build an efficient and easy-to-use IT DevOps visualization system, or adapt an existing system, in just a few minutes. For TDengine's powerful data writing and querying performance and other features, please refer to the official documentation and successful product implementation cases. diff --git a/docs-en/25-application/03-immigrate.md b/docs-en/25-application/03-immigrate.md index 81d5f512bfc8ed3cd1f5223a9ff72218023515f0..4d47aec1d76014ba63f6be91004abcc3934769f7 100644 --- a/docs-en/25-application/03-immigrate.md +++ b/docs-en/25-application/03-immigrate.md @@ -3,10 +3,9 @@ sidebar_label: OpenTSDB Migration to TDengine title: Best Practices for Migrating OpenTSDB Applications to TDengine --- -As a distributed, scalable, HBase-based distributed time-series database software, thanks to its first-mover advantage, OpenTSDB has been introduced and widely used in DevOps by people. However, using new technologies like cloud computing, microservices, and containerization technology with rapid development. Enterprise-level services are becoming more and more diverse. The architecture is becoming more complex. +As a distributed, scalable, distributed time-series database platform based on HBase, and thanks to its first-mover advantage, OpenTSDB is widely used for monitoring in DevOps. However, as new technologies like cloud computing, microservices, and containerization technology has developed rapidly, Enterprise-level services are becoming more and more diverse and the architecture is becoming more complex. -From this situation, it increasingly plagues to use of OpenTSDB as a DevOps backend storage for monitoring by performance issues and delayed feature upgrades. The resulting increase in application deployment costs and reduced operational efficiency. -These problems are becoming increasingly severe as the system scales up. +As a result, as a DevOps backend for monitoring, OpenTSDB is plagued by performance issues and delayed feature upgrades. This has resulted in increased application deployment costs and reduced operational efficiency. These problems become increasingly severe as the system tries to scale up. To meet the fast-growing IoT big data market and technical needs, TAOSData developed an innovative big-data processing product, **TDengine**. @@ -14,14 +13,14 @@ After learning the advantages of many traditional relational databases and NoSQL Compared with OpenTSDB, TDengine has the following distinctive features. -- Performance of data writing and querying far exceeds that of OpenTSDB. -- Efficient compression mechanism for time-series data, which compresses less than 1/5 of the storage space on disk. -- The installation and deployment are straightforward. A single installation package can complete the installation and deployment and does not rely on other third-party software. The entire installation and deployment process in a few seconds; -- The built-in functions cover all of OpenTSDB's query functions. And support more time-series data query functions, scalar functions, and aggregation functions. And support advanced query functions such as multiple time-window aggregations, join query, expression operation, multiple group aggregation, user-defined sorting, and user-defined functions. Adopting SQL-like syntax rules is more straightforward and has no learning cost. +- Data writing and querying performance far exceeds that of OpenTSDB. +- Efficient compression mechanism for time-series data, which compresses to less than 1/5 of the storage space, on disk. +- The installation and deployment are straightforward. A single installation package can complete the installation and deployment and does not rely on other third-party software. The entire installation and deployment process takes a few seconds. +- The built-in functions cover all of OpenTSDB's query functions and TDengine supports more time-series data query functions, scalar functions, and aggregation functions. TDengine also supports advanced query functions such as multiple time-window aggregations, join query, expression operation, multiple group aggregation, user-defined sorting, and user-defined functions. With a SQL-like query language, querying is more straightforward and has no learning cost. - Supports up to 128 tags, with a total tag length of 16 KB. - In addition to the REST interface, it also provides interfaces to Java, Python, C, Rust, Go, C# and other languages. Its supports a variety of enterprise-class standard connector protocols such as JDBC. -If we migrate the applications originally running on OpenTSDB to TDengine, we will effectively reduce the compute and storage resource consumption and the number of deployed servers. And will also significantly reduce the operation and maintenance costs, making operation and maintenance management more straightforward and more accessible, and considerably reducing the total cost of ownership. Like OpenTSDB, TDengine has also been open-sourced, including the stand-alone version and the cluster version source code. So there is no need to be concerned about the vendor-lock problem. +Migrating applications originally running on OpenTSDB to TDengine, effectively reduces compute and storage resource consumption and the number of deployed servers. It also significantly reduces operation and maintenance costs, makes operation and maintenance management more straightforward and more accessible, and considerably reduces the total cost of ownership. Like OpenTSDB, TDengine has also been open-sourced. Both the stand-alone version and the cluster version are open-sourced and there is no need to be concerned about the vendor-lock problem. We will explain how to migrate OpenTSDB applications to TDengine quickly, securely, and reliably without coding, using the most typical DevOps scenarios. Subsequent chapters will go into more depth to facilitate migration for non-DevOps systems. @@ -32,9 +31,9 @@ We will explain how to migrate OpenTSDB applications to TDengine quickly, secure The following figure (Figure 1) shows the system's overall architecture for a typical DevOps application scenario. **Figure 1. Typical architecture in a DevOps scenario** -Figure 1. [IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](/img/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.jpg "Figure 1. Typical architecture in a DevOps scenario") +![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp "Figure 1. Typical architecture in a DevOps scenario") -In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics. Data collectors to aggregate information collected by agents, systems for persistent data storage and management, and tools for monitoring data visualization (e.g., Grafana, etc.). +In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics. There are also data collectors to aggregate information collected by agents, systems for persistent data storage and management, and tools for data visualization (e.g., Grafana, etc.). The agents deployed in the application nodes are responsible for providing operational metrics from different sources to collectd/Statsd. And collectd/StatsD is accountable for pushing the aggregated data to the OpenTSDB cluster system and then visualizing the data using the visualization kanban board software, Grafana. @@ -44,15 +43,15 @@ The agents deployed in the application nodes are responsible for providing opera First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to the blog ["Installation and Uninstallation of TDengine Multiple Installation Packages"](https://www.taosdata.com/blog/2019/08/09/566.html). -Note that once the installation is complete, do not start the `taosd` service immediately, but after properly configuring the parameters. +Note that once the installation is complete, do not start the `taosd` service before properly configuring the parameters. - **Adjusting the data collector configuration** TDengine version 2.4 and later version includes `taosAdapter`. taosAdapter is a stateless, rapidly elastic, and scalable component. taosAdapter supports Influxdb's Line Protocol and OpenTSDB's telnet/JSON writing protocol specification, providing rich data access capabilities, effectively saving user migration costs and reducing the difficulty of user migration. -Users can flexibly deploy taosAdapter instances according to their requirements to rapidly improve the throughput of data writes in conjunction with the needs of scenarios and provide guarantees for data writes in different application scenarios. +Users can flexibly deploy taosAdapter instances, based on their requirements, to improve data writing throughput and provide guarantees for data writes in different application scenarios. -Through taosAdapter, users can directly push the data collected by `collectd` or `StatsD` to TDengine to achieve seamless migration of application scenarios, which is very easy and convenient. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/). +Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/). If using collectd, modify the configuration file in its default location `/etc/collectd/collectd.conf` to point to the IP address and port of the node where to deploy taosAdapter. For example, assuming the taosAdapter IP address is 192.168.1.130 and port 6046, configure it as follows. @@ -66,29 +65,29 @@ LoadPlugin write_tsdb ``` -You can use collectd and push the data to taosAdapter utilizing the push to OpenTSDB plugin. taosAdapter will call the API to write the data to TDengine, thus completing the writing of the data. If you are using StatsD, adjust the profile information accordingly. +You can use collectd and push the data to taosAdapter utilizing the write_tsdb plugin. taosAdapter will call the API to write the data to TDengine. If you are using StatsD, adjust the profile information accordingly. - **Tuning the Dashboard system** -After writing the data to TDengine properly, you can adapt Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](/third-party/grafana). +After writing the data to TDengine, you can configure Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](/third-party/grafana). TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use. **Importing Grafana Templates** Figure 2. -! [](/img/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.jpg "Figure 2. Importing a Grafana Template") +![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp "Figure 2. Importing a Grafana Template") -After the above steps, you completed the migration to replace OpenTSDB with TDengine. You can see that the whole process is straightforward, there is no need to write any code, and only some configuration files need to be adjusted to meet the migration work. +With the above steps completed, you have finished replacing OpenTSDB with TDengine. You can see that the whole process is straightforward, there is no need to write any code, and only some configuration files need to be changed. ### 3. Post-migration architecture -After completing the migration, the figure below (Figure 3) shows the system's overall architecture. The whole process of the acquisition side, the data writing, and the monitoring and presentation side are all kept stable, except for a few configuration adjustments, which do not involve any critical changes or alterations. OpenTSDB to TDengine migration action, using TDengine more powerful processing power and query performance. +After completing the migration, the figure below (Figure 3) shows the system's overall architecture. The whole process of the acquisition side, the data writing, and the monitoring and presentation side are all kept stable. There are a few configuration adjustments, which do not involve any critical changes or alterations. Migrating to TDengine from OpenTSDB leads to powerful processing power and query performance. -In most DevOps scenarios, if you have a small OpenTSDB cluster (3 or fewer nodes) for providing the storage layer of DevOps and rely on OpenTSDB to give a data persistence layer and query capabilities, you can safely replace OpenTSDB with TDengine. TDengine will save more compute and storage resources. With the same compute resource allocation, a single TDengine can meet the service capacity provided by 3 to 5 OpenTSDB nodes. If the scale is more prominent, then TDengine clustering is required. - -Suppose your application is particularly complex, or the application domain is not a DevOps scenario. You can continue reading subsequent chapters for a more comprehensive and in-depth look at the advanced topics of migrating an OpenTSDB application to TDengine. +In most DevOps scenarios, if you have a small OpenTSDB cluster (3 or fewer nodes) which provides storage and data persistence layer in addition to query capability, you can safely replace OpenTSDB with TDengine. TDengine will save compute and storage resources. With the same compute resource allocation, a single TDengine can meet the service capacity provided by 3 to 5 OpenTSDB nodes. TDengine clustering may be required depending on the scale of the application. **Figure 3. System architecture after migration** -! [IT-DevOps-Solutions-Immigrate-TDengine-Arch](/img/IT-DevOps-Solutions-Immigrate-TDengine-Arch.jpg "Figure 3. System architecture after migration completion") +![TDengine Database IT-DevOps-Solutions-Immigrate-TDengine-Arch](./IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp "Figure 3. System architecture after migration completion") + +The following chapters provide a more comprehensive and in-depth look at the advanced topics of migrating an OpenTSDB application to TDengine. This will be useful if your application is particularly complex and is not a DevOps application. ## Migration evaluation and strategy for other scenarios @@ -96,26 +95,25 @@ Suppose your application is particularly complex, or the application domain is n This chapter describes the differences between OpenTSDB and TDengine at the system functionality level. After reading this chapter, you can fully evaluate whether you can migrate some complex OpenTSDB-based applications to TDengine, and what you should pay attention to after migration. -TDengine currently only supports Grafana for visual kanban rendering, so if your application uses front-end kanban boards other than Grafana (e.g., [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github) .com/box/StatusWolf), etc.). You cannot directly migrate those front-end kanbans to TDengine, and the front-end kanban will need to be ported to Grafana to work correctly. +TDengine currently only supports Grafana for visual kanban rendering, so if your application uses front-end kanban boards other than Grafana (e.g., [TSDash](https://github.com/facebook/tsdash), [Status Wolf](https://github.com/box/StatusWolf), etc.) you cannot directly migrate those front-end kanbans to TDengine. The front-end kanban will need to be ported to Grafana to work correctly. -TDengine version 2.3.0.x only supports collectd and StatsD as data collection aggregation software but will provide more data collection aggregation software in the future. If you use other data aggregators on the collection side, your application needs to be ported to these two data aggregation systems to write data correctly. +TDengine version 2.3.0.x only supports collectd and StatsD as data collection and aggregation software but future versions will provide support for more data collection and aggregation software in the future. If you use other data aggregators on the collection side, your application needs to be ported to these two data aggregation systems to write data correctly. In addition to the two data aggregator software protocols mentioned above, TDengine also supports writing data directly via InfluxDB's line protocol and OpenTSDB's data writing protocol, JSON format. You can rewrite the logic on the data push side to write data using the line protocols supported by TDengine. -In addition, if your application uses the following features of OpenTSDB, you need to understand the following considerations before migrating your application to TDengine. +In addition, if your application uses the following features of OpenTSDB, you need to take into account the following considerations before migrating your application to TDengine. 1. `/api/stats`: If your application uses this feature to monitor the service status of OpenTSDB, and you have built the relevant logic to link the processing in your application, then this part of the status reading and fetching logic needs to be re-adapted to TDengine. TDengine provides a new mechanism for handling cluster state monitoring to meet the monitoring and maintenance needs of your application. -2. `/api/tree`: If you rely on this feature of OpenTSDB for the hierarchical organization and maintenance of timelines, you cannot migrate it directly to TDengine, which uses a database -> super table -> sub-table hierarchy to organize and maintain timelines, with all timelines belonging to the same super table in the same system hierarchy, but it is possible to simulate a logical multi-level structure of the application through the unique construction of different tag values. -3. `Rollup And PreAggregates`: The use of Rollup and PreAggregates requires the application to decide where to access the Rollup results and, in some scenarios, to access the actual results. The opacity of this structure makes the application processing logic extraordinarily complex and not portable at all. We think this strategy is a compromise when the time-series database does not. -TDengine does not support automatic downsampling of multiple timelines and preaggregation (for a range of periods) for the time being. Still, thanks to its high-performance query processing logic can provide very high-performance query responses without relying on Rollup and preaggregation (for a range of periods), making your application query processing logic much more straightforward. -The logic is much simpler. -4. `Rate`: TDengine provides two functions to calculate the rate of change of values, namely `Derivative` (the result is consistent with the Derivative behavior of InfluxDB) and `IRate` (the result is compatible with the IRate function in Prometheus). However, the results of these two functions are slightly different from Rate, but the functions are more powerful overall. In addition, TDengine supports all the calculation functions provided by OpenTSDB, and TDengine's query functions are much more potent than those supported by OpenTSDB, which can significantly simplify the processing logic of your application. +2. `/api/tree`: If you rely on this feature of OpenTSDB for the hierarchical organization and maintenance of timelines, you cannot migrate it directly to TDengine, which uses a database -> super table -> sub-table hierarchy to organize and maintain timelines, with all timelines belonging to the same super table in the same system hierarchy. But it is possible to simulate a logical multi-level structure of the application through the unique construction of different tag values. +3. `Rollup And PreAggregates`: The use of Rollup and PreAggregates requires the application to decide where to access the Rollup results and, in some scenarios, to access the actual results. The opacity of this structure makes the application processing logic extraordinarily complex and not portable at all. +While TDengine does not currently support automatic downsampling of multiple timelines and preaggregation (for a range of periods), thanks to its high-performance query processing logic, it can provide very high-performance query responses without relying on Rollup and preaggregation (for a range of periods). This makes your application query processing logic straightforward and simple. +4. `Rate`: TDengine provides two functions to calculate the rate of change of values, namely `Derivative` (the result is consistent with the Derivative behavior of InfluxDB) and `IRate` (the result is compatible with the IRate function in Prometheus). However, the results of these two functions are slightly different from that of Rate. But the TDengine functions are more powerful. In addition, TDengine supports all the calculation functions provided by OpenTSDB. TDengine's query functions are much more powerful than those supported by OpenTSDB, which can significantly simplify the processing logic of your application. -Through the above introduction, I believe you should be able to understand the changes brought about by the migration of OpenTSDB to TDengine. And this information will also help you correctly determine whether you would migrate your application to TDengine to experience the powerful and convenient time-series data processing capability provided by TDengine. +With the above introduction, we believe you should be able to understand the changes brought about by the migration of OpenTSDB to TDengine. And this information will also help you correctly determine whether you should migrate your application to TDengine to experience the powerful and convenient time-series data processing capability provided by TDengine. ### 2. Migration strategy suggestion -First, the OpenTSDB-based system migration involves data schema design, system scale estimation, and data write end transformation, data streaming, and application adaptation; after that, the two systems will run in parallel for a while and then migrate the historical data to TDengine. Of course, if your application has some functions that strongly depend on the above OpenTSDB features and you do not want to stop using them, you can migrate the historical data to TDengine. -You can consider keeping the original OpenTSDB system running while starting TDengine to provide the primary services. +OpenTSDB-based system migration involves data schema design, system scale estimation, data write transformation, data streaming, and application changes. The two systems should run in parallel for a while and then the historical data should be migrated to TDengine if your application has some functions that strongly depend on the above OpenTSDB features and you do not want to stop using them. +You can also consider keeping the original OpenTSDB system running while using TDengine to provide the primary services. ## Data model design @@ -129,16 +127,19 @@ Let us now assume a DevOps scenario where we use collectd to collect the underly | 2 | swap | value | double | host | swap_type | swap_type_instance | source | n/a | | 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source | -TDengine requires the data stored to have a data schema, i.e., you need to create a super table and specify the schema of the super table before writing the data. For data schema creation, you have two ways to do this: 1) Take advantage of TDengine's native data writing support for OpenTSDB by calling the TDengine API to write (text line or JSON format) -and automate the creation of single-value models. This approach does not require significant adjustments to the data writing application, nor does it require converting the written data format. +TDengine requires the data stored to have a data schema, i.e., you need to create a super table and specify the schema of the super table before writing the data. For data schema creation, you have two ways to do this: +1) Take advantage of TDengine's native data writing support for OpenTSDB by calling the TDengine API to write (text line or JSON format) and automate the creation of single-value models. This approach does not require significant adjustments to the data writing application, nor does it require converting the written data format. At the C level, TDengine provides the `taos_schemaless_insert()` function to write data in OpenTSDB format directly (in early version this function was named `taos_insert_lines()`). Please refer to the sample code `schemaless.c` in the installation package directory as reference. -(2) based on a complete understanding of TDengine's data model, to establish the mapping relationship between OpenTSDB and TDengine's data model adjustment manually. Considering that OpenTSDB is a single-value mapping model, recommended using the single-value model in TDengine. TDengine can support both multi-value and single-value models. +(2) Based on a thorough understanding of TDengine's data model, establish a mapping between OpenTSDB and TDengine's data model. Considering that OpenTSDB is a single-value mapping model, we recommended using the single-value model in TDengine for simplicity. But keep in mind that TDengine supports both multi-value and single-value models. - **Single-valued model**. -The steps are as follows: use the name of the metrics as the name of the TDengine super table, which build with two basic data columns - timestamp and value, and the label of the super table is equivalent to the label information of the metrics, and the number of labels is equal to the number of labels of the metrics. The names of sub-tables are named with fixed rules: `metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ...` as the sub-table name. +The steps are as follows: +- Use the name of the metrics as the name of the TDengine super table +- Build with two basic data columns - timestamp and value. The label of the super table is equivalent to the label information of the metrics, and the number of labels is equal to the number of labels of the metrics. +- The names of sub-tables are named with fixed rules: `metric + '_' + tags1_value + '_' + tag2_value + '_' + tag3_value ...` as the sub-table name. Create 3 super tables in TDengine. @@ -158,13 +159,13 @@ The final system will have about 340 sub-tables and three super-tables. Note tha - **Multi-value model** -Suppose you want to take advantage of TDengine's multi-value modeling capabilities. In that case, you need first to meet the requirements that different collection quantities have the same collection frequency and can reach the **data write side simultaneously via a message queue**, thus ensuring writing multiple metrics at once using SQL statements. The metric's name is used as the name of the super table to create a multi-column model of data that has the same collection frequency and can arrive simultaneously. The names of the sub-tables are named using a fixed rule. Each of the above metrics contains only one measurement value, so converting it into a multi-value model is impossible. +Ideally you should take advantage of TDengine's multi-value modeling capabilities. In that case, you first need to meet the requirement that different collection quantities have the same collection frequency and can reach the **data write side simultaneously via a message queue**, thus ensuring writing multiple metrics at once, using SQL statements. The metric's name is used as the name of the super table to create a multi-column model of data that has the same collection frequency and can arrive simultaneously. The sub-tables are named using a fixed rule. Each of the above metrics contains only one measurement value, so converting it into a multi-value model is impossible. ## Data triage and application adaptation -Subscribe data from the message queue and start the adapted writer to write the data. +Subscribe to the message queue and start writing data to TDengine. -After writing the data starts for a while, you can use SQL statements to check whether the amount of data written meets the expected writing requirements. Use the following SQL statement to count the amount of data. +After data has been written for a while, you can use SQL statements to check whether the amount of data written meets the expected writing requirements. Use the following SQL statement to count the amount of data. ```sql select count(*) from memory @@ -184,7 +185,7 @@ To facilitate historical data migration, we provide a plug-in for the data synch For the specific usage of DataX and how to use DataX to write data to TDengine, please refer to [DataX-based TDengine Data Migration Tool](https://www.taosdata.com/blog/2021/10/26/3156.html). -After migrating via DataX, we found that we can significantly improve the efficiency of migrating historical data by starting multiple processes and migrating numerous metrics simultaneously. The following are some records of the migration process. I wish to use these for application migration as a reference. +After migrating via DataX, we found that we can significantly improve the efficiency of migrating historical data by starting multiple processes and migrating numerous metrics simultaneously. The following are some records of the migration process. We provide these as a reference for application migration. | Number of datax instances (number of concurrent processes) | Migration record speed (pieces/second) | | ----------------------------- | ------------------- -- | @@ -202,13 +203,13 @@ Suppose you need to use the multi-value model for data writing. In that case, yo Manual migration of data requires attention to the following two issues: -1) When storing the exported data on the disk, the disk needs to have enough storage space to accommodate the exported data files fully. Adopting the partial import mode to avoid the shortage of disk file storage after the total amount of data is exported. Preferentially export the timelines belonging to the same super table. Then the exported data files are imported into the TDengine system. +1) When storing the exported data on the disk, the disk needs to have enough storage space to accommodate the exported data files fully. To avoid running out of disk space, you can adopt a partial import mode in which you preferentially export the timelines belonging to the same super table and then only those files are imported into TDengine. -2) Under the full load of the system, if there are enough remaining computing and IO resources, establish a multi-threaded importing to maximize the efficiency of data migration. Considering the vast load that data parsing brings to the CPU, it is necessary to control the maximum number of parallel tasks to avoid the overall overload of the system triggered by importing historical data. +2) Under the full load of the system, if there are enough remaining computing and IO resources, establish a multi-threaded import to maximize the efficiency of data migration. Considering the vast load that data parsing brings to the CPU, it is necessary to control the maximum number of parallel tasks to avoid overloading the system when importing historical data. Due to the ease of operation of TDengine itself, there is no need to perform index maintenance and data format change processing in the entire process. The whole process only needs to be executed sequentially. -When wholly importing the historical data into TDengine, the two systems run simultaneously and then switch the query request to TDengine to achieve seamless application switching. +While importing historical data into TDengine, the two systems should run simultaneously. Once all the data is migrated, switch the query request to TDengine to achieve seamless application switching. ## Appendix 1: OpenTSDB query function correspondence table @@ -222,12 +223,12 @@ Example: SELECT avg(val) FROM (SELECT first(val) FROM super_table WHERE ts >= startTime and ts <= endTime INTERVAL(20s) Fill(linear)) INTERVAL(20s) ``` -Remark: +Remarks: 1. The value in Interval needs to be the same as the interval value in the outer query. -2. The interpolation processing in TDengine needs to use subqueries to assist in the completion. As shown above, it is enough to specify the interpolation type in the inner query. Since the interpolation of the values ​​in OpenTSDB uses linear interpolation, use fill( in the interpolation clause. linear) to declare the interpolation type. The following functions with the exact interpolation calculation requirements are processed by this method. -3. The parameter 20s in Interval indicates that the inner query will generate results according to a time window of 20 seconds. In an actual query, it needs to adjust to the time interval between different records. It ensures that producing interpolation results equivalent to the original data. -4. Due to the particular interpolation strategy and mechanism of OpenTSDB, the method of the first interpolation and then calculation in the aggregate query (Aggregate) makes the calculation results impossible to be utterly consistent with TDengine. But in the case of downsampling (Downsample), TDengine and OpenTSDB can obtain consistent results (since OpenTSDB performs aggregation and downsampling queries). +2. Interpolation processing in TDengine uses subqueries to assist in completion. As shown above, it is enough to specify the interpolation type in the inner query. Since OpenTSDB uses linear interpolation, use `fill(linear)` to declare the interpolation type in TDengine. Some of the functions mentioned below have exactly the same interpolation calculation requirements. +3. The parameter 20s in Interval indicates that the inner query will generate results according to a time window of 20 seconds. In an actual query, it needs to adjust to the time interval between different records. It ensures that interpolation results are equivalent to the original data. +4. Due to the particular interpolation strategy and mechanism of OpenTSDB i.e. interpolation followed by aggregate calculation, it is impossible for the results to be completely consistent with those of TDengine. But in the case of downsampling (Downsample), TDengine and OpenTSDB can obtain consistent results (since OpenTSDB performs aggregation and downsampling queries). ### Count @@ -261,7 +262,7 @@ Select apercentile(col1, 50, “t-digest”) from table_name Remark: -1. During the approximate query processing, OpenTSDB uses the t-digest algorithm by default, so in order to obtain the same calculation result, the algorithm used needs to be specified in the `apercentile()` function. TDengine can support two different approximation processing algorithms, declared by "default" and "t-digest" respectively. +1. When calculating estimate percentiles, OpenTSDB uses the t-digest algorithm by default. In order to obtain the same calculation results in TDengine, the algorithm used needs to be specified in the `apercentile()` function. TDengine can support two different percentile calculation algorithms named "default" and "t-digest" respectively. ### First @@ -379,35 +380,34 @@ We still use the hypothetical environment from Chapter 4. There are three measur ### Storage resource estimation Assuming that the number of sensor devices that generate data and need to be stored is `n`, the frequency of data generation is `t` per second, and the length of each record is `L` bytes, the scale of data generated per day is `n * t * L` bytes. Assuming the compression ratio is `C`, the daily data size is `(n * t * L)/C` bytes. The storage resources are estimated to accommodate the data scale for 1.5 years. In the production environment, the compression ratio C of TDengine is generally between 5 and 7. -With additional 20% ​​redundancy, you can calculate the required storage resources: +With additional 20% redundancy, you can calculate the required storage resources: ```matlab (n * t * L) * (365 * 1.5) * (1+20%)/C ```` - -Combined with the above calculation formula, bring the parameters into the formula, and the raw data scale generated every year is 11.8TB without considering the label information. Note that since tag information is associated with each timeline in TDengine, not every record. The scale of the amount of data to be recorded is somewhat reduced relative to the generated data, and this part of label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB. +Substituting in the above formula, the raw data generated every year is 11.8TB without considering the label information. Note that tag information is associated with each timeline in TDengine, not every record. The amount of data to be recorded is somewhat reduced relative to the generated data, and label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB. ### Storage Device Selection Considerations -The hard disk should be capable of better random read performance. Considering using an SSD as much as possible is a better choice. A disk with better random read performance is a great help to improve the system's query performance and improve the query response performance as a whole system. To obtain better query performance, the performance index of the single-threaded random read IOPS of the hard disk device should not be lower than 1000, and it is better to reach 5000 IOPS or more. Recommend to use `fio` utility software to evaluate the running performance (please refer to Appendix 1 for specific usage) for the random IO read of the current device to confirm whether it can meet the requirements of random read of large files. +A disk with better random read performance, such as an SSD, improves the system's query performance and improves the query response performance of the whole system. To obtain better query performance, the performance index of the single-threaded random read IOPS of the hard disk device should not be lower than 1000, and it is better to reach 5000 IOPS or more. We recommend using `fio` utility software to evaluate the running performance (please refer to Appendix 1 for specific usage) for the random IO read of the current device to confirm whether it can meet the requirements of random read of large files. Hard disk writing performance has little effect on TDengine. The TDengine writing process adopts the append write mode, so as long as it has good sequential write performance, both SAS hard disks and SSDs in the general sense can well meet TDengine's requirements for disk write performance. ### Computational resource estimates -Due to the particularity of IoT data, after the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](/operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second. +Due to the characteristics of IoT data, when the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](/operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second. -In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many as 10 cores or 20 cores. +In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many cores i.e. 20 cores. ### Memory resource estimation -The database allocates 16MB\*3 buffer memory for each Vnode by default. If the cluster system includes 22 CPU cores, TDengine will create 22 Vnodes (virtual nodes) by default. Each Vnode contains 1000 tables, which can accommodate all the tables. Then it takes about 1.5 hours to write a block, which triggers the drop, and no adjustment is required. A total of 22 Vnodes require about 1GB of memory cache. Considering the memory needed for the query, assuming that the memory overhead of each query is about 50MB, the memory required for 500 queries concurrently is about 25GB. +The database allocates 16MB\*3 buffer memory for each Vnode by default. If the cluster system includes 22 CPU cores, TDengine will create 22 Vnodes (virtual nodes) by default. Each Vnode contains 1000 tables, which is more than enough to accommodate all the tables in our hypothetical scenario. Then it takes about 1.5 hours to write a block, which triggers persistence to disk without requiring any adjustment. A total of 22 Vnodes require about 1GB of memory cache. Considering the memory needed for the query, assuming that the memory overhead of each query is about 50MB, the memory required for 500 queries concurrently is about 25GB. In summary, using a single 16-core 32GB machine or a cluster of 2 8-core 16GB machines is enough. ## Appendix 3: Cluster Deployment and Startup -TDengine provides a wealth of help documents to explain many aspects of cluster installation and deployment. Here is the list of corresponding document for your reference. +TDengine provides a wealth of help documents to explain many aspects of cluster installation and deployment. Here is the list of documents for your reference. ### Cluster Deployment @@ -421,7 +421,7 @@ To ensure that the system can obtain the necessary information for regular opera FQDN, firstEp, secondEP, dataDir, logDir, tmpDir, serverPort. For the specific meaning and setting requirements of each parameter, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)" -Follow the same steps to set parameters on the nodes that need running, start the taosd service, and then add Dnodes to the cluster. +Follow the same steps to set parameters on the other nodes, start the taosd service, and then add Dnodes to the cluster. Finally, start `taos` and execute the `show dnodes` command. If you can see all the nodes that have joined the cluster, the cluster building process was successfully completed. For specific operation procedures and precautions, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)". diff --git a/docs-en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp b/docs-en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp new file mode 100644 index 0000000000000000000000000000000000000000..147a65b17bff2aa0e44faa206618bdce5664e1ca Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp new file mode 100644 index 0000000000000000000000000000000000000000..3ca99c835b33df8845adf1b52d8fb8eb63076e82 Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..04811f61b9b318e129552d87cd48eabf6e99feab Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp new file mode 100644 index 0000000000000000000000000000000000000000..36930068758556f4de5b58321804a96401c64b22 Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-Telegraf.webp b/docs-en/25-application/IT-DevOps-Solutions-Telegraf.webp new file mode 100644 index 0000000000000000000000000000000000000000..fd5461ec9b37be66cac4c17fb1f81fec76158330 Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-Telegraf.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-collectd-dashboard.webp b/docs-en/25-application/IT-DevOps-Solutions-collectd-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..879c27a1a5843c714ff3c33c1dccfa32a2154b82 Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-collectd-dashboard.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-statsd-dashboard.webp b/docs-en/25-application/IT-DevOps-Solutions-statsd-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..1d4c655970b5f3fcb3be2d65d67eb42f08f35862 Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-statsd-dashboard.webp differ diff --git a/docs-en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp b/docs-en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp new file mode 100644 index 0000000000000000000000000000000000000000..105afcdb8312b23675f62ff6339d5e737b5cd958 Binary files /dev/null and b/docs-en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp differ diff --git a/docs-en/27-train-faq/01-faq.md b/docs-en/27-train-faq/01-faq.md index 63ca954d117fb1493992a08990de05677befab97..e182e25b9e98bad11b9c90146400e3720605489e 100644 --- a/docs-en/27-train-faq/01-faq.md +++ b/docs-en/27-train-faq/01-faq.md @@ -5,38 +5,38 @@ title: Frequently Asked Questions ## Submit an Issue -If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem description, including TDengine version, hardware and OS information, the steps to reproduce the problem, etc. It would be very helpful if you package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine, if they have been changed in your configuration, please use according to the actual configuration. It's recommended to firstly set `debugFlag` to 135 in `taos.cfg`, restart `taosd`, then reproduce the problem and collect logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131. +If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem. In your description please include the TDengine version, hardware and OS information, the steps to reproduce the problem and any other relevant information. It would be very helpful if you can package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine. If you have changed the default directories in your configuration, please package the files in your configured directories. We recommended setting `debugFlag` to 135 in `taos.cfg`, restarting `taosd`, then reproducing the problem and collecting the logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131. ## Frequently Asked Questions ### 1. How to upgrade to TDengine 2.0 from older version? -version 2.x is not compatible with version 1.x regarding configuration file and data file, please do following before upgrading: +version 2.x is not compatible with version 1.x. With regard to the configuration and data files, please perform the following steps before upgrading. Please follow data integrity, security, backup and other relevant SOPs, best practices before removing/deleting any data. -1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg` +1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg` 2. Delete log files: `sudo rm -rf /var/log/taos/` 3. Delete data files if the data doesn't need to be kept: `sudo rm -rf /var/lib/taos/` -4. Install latests 2.x version -5. If the data needs to be kept and migrated to newer version, please contact professional service of TDengine for assistance +4. Install latest 2.x version +5. If the data needs to be kept and migrated to newer version, please contact professional service at TDengine for assistance. ### 2. How to handle "Unable to establish connection"? -When the client is unable to connect to the server, you can try following ways to find out why. +When the client is unable to connect to the server, you can try the following ways to troubleshoot and resolve the problem. 1. Check the network - - Check if the hosts where the client and server are running can be accessible to each other, for example by `ping` command. - - Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. It's better to firstly disable firewall for diagnostics. - - Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side - - Check if the `firstEp` is set properly in the `taos.cfg` used by the client side + - Check if the hosts where the client and server are running are accessible to each other, for example by `ping` command. + - Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. If possible, disable the firewall for diagnostics, but please ensure that you are following security and other relevant protocols. + - Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side. + - Check if the `firstEp` is set properly in the `taos.cfg` used by the client side. 2. Make sure the client version and server version are same. 3. On server side, check the running status of `taosd` by executing `systemctl status taosd` . If your server is started using another way instead of `systemctl`, use the proper method to check whether the server process is running normally. -4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect toe the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`. +4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect to the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`. -5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path, it's suggested to put `taos.dll` under `C:\Windows\System32`. +5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path. We recommend putting `taos.dll` under `C:\Windows\System32`. 6. Some advanced network diagnostics tools @@ -45,7 +45,7 @@ When the client is unable to connect to the server, you can try following ways t Check whether a TCP port on server side is open: `nc -l {port}` Check whether a TCP port on client side is open: `nc {hostIP} {port}` - - On Windows system `Net-TestConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on serer side is open for access. + - On Windows system `Net-TestConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on server side is open for access. 7. TDengine CLI `taos` can also be used to check network, please refer to [TDengine CLI](/reference/taos-shell). @@ -80,7 +80,7 @@ From version 2.1.7.0, at most 4096 columns can be defined for a table. Inserting data in batch is a good practice. Single SQL statement can insert data for one or multiple tables in batch. -### 9. JDBC Error: the excuted SQL is not a DML or a DDL? +### 9. JDBC Error: the executed SQL is not a DML or a DDL? Please upgrade to latest JDBC driver, for details please refer to [Java Connector](/reference/connector/java) @@ -104,7 +104,7 @@ ALTER LOCAL flag_name flag_value; -### 13. Hhat to do if go compilation fails? +### 13. What to do if go compilation fails? From version 2.3.0.0, a new component named `taosAdapter` is introduced. Its' developed in Go. If you want to compile from source code and meet go compilation problems, try to do below steps to resolve Go environment problems. diff --git a/docs-en/27-train-faq/03-docker.md b/docs-en/27-train-faq/03-docker.md index 0bcc39f903c635aed7fe8c850d8b706f6ba92293..afee13c1377b0b4331d6f7ec20251d1aa2db81a1 100644 --- a/docs-en/27-train-faq/03-docker.md +++ b/docs-en/27-train-faq/03-docker.md @@ -3,15 +3,15 @@ sidebar_label: TDengine in Docker title: Deploy TDengine in Docker --- -Even though it's not recommended to deploy TDengine using docker in production system, docker is still very useful in development environment, especially when your host is not Linux. From version 2.0.14.0, the official image of TDengine can support X86-64, X86, arm64, and rm32 . +We do not recommend deploying TDengine using Docker in a production system. However, Docker is still very useful in a development environment, especially when your host is not Linux. From version 2.0.14.0, the official image of TDengine can support X86-64, X86, arm64, and rm32 . -In this chapter a simple step by step guide of using TDengine in docker is introduced. +In this chapter we introduce a simple step by step guide to use TDengine in Docker. ## Install Docker -The installation of docker please refer to [Get Docker](https://docs.docker.com/get-docker/). +To install Docker please refer to [Get Docker](https://docs.docker.com/get-docker/). -After docker is installed, you can check whether Docker is installed properly by displaying Docker version. +After Docker is installed, you can check whether Docker is installed properly by displaying Docker version. ```bash $ docker -v @@ -27,7 +27,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdeng 526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd ``` -In the above command, a docker container is started to run TDengine server, the port range 6030-6049 of the container is mapped to host port range 6030-6049. If port range 6030-6049 has been occupied on the host, please change to an available host port range. Regarding the requirements about ports on the host, please refer to [Port Configuration](/reference/config/#serverport). +In the above command, a docker container is started to run TDengine server, the port range 6030-6049 of the container is mapped to host port range 6030-6049. If port range 6030-6049 has been occupied on the host, please change to an available host port range. For port requirements on the host, please refer to [Port Configuration](/reference/config/#serverport). - **docker run**: Launch a docker container - **-d**: the container will run in background mode @@ -72,7 +72,7 @@ $ docker exec -it tdengine /bin/bash root@tdengine-server:~/TDengine-server-2.4.0.4# ``` -- **docker exec**: Attach to the continaer +- **docker exec**: Attach to the container - **-i**: Interactive mode - **-t**: Use terminal - **tdengine**: Container name, up to the output of `docker ps` @@ -95,7 +95,7 @@ In TDengine CLI, SQL commands can be executed to create/drop databases, tables, ### Access TDengine from host -If `-p` used to map ports properly between host and container, it's also able to access TDengine in container from the host as long as `firstEp` is configured correctly for the client on host. +If option `-p` used to map ports properly between host and container, it's also able to access TDengine in container from the host as long as `firstEp` is configured correctly for the client on host. ``` $ taos @@ -118,7 +118,7 @@ Output is like below: {"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} ``` -For details of REST API please refer to [REST API]](/reference/rest-api/). +For details of REST API please refer to [REST API](/reference/rest-api/). ### Run TDengine server and taosAdapter inside container @@ -156,7 +156,7 @@ Below is an example output: {"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2021-12-28 09:18:55.765",10,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} ``` -### Use taosBenchmark on host to access TDenginer server in container +### Use taosBenchmark on host to access TDengine server in container 1. Run `taosBenchmark`, named as `taosdemo` previously, on the host: @@ -265,13 +265,13 @@ Below is an example output: $ taos> select groupid, location from test.d0; groupid | location | ================================= - 0 | shanghai | + 0 | California.SanDiego | Query OK, 1 row(s) in set (0.003490s) ``` ### Access TDengine from 3rd party tools -A lot of 3rd party tools can be used to write data into TDengine through `taosAdapter` , for details please refer to [3rd party tools](/third-party/). +A lot of 3rd party tools can be used to write data into TDengine through `taosAdapter`, for details please refer to [3rd party tools](/third-party/). There is nothing different from the 3rd party side to access TDengine server inside a container, as long as the end point is specified correctly, the end point should be the FQDN and the mapped port of the host. diff --git a/docs-examples/c/async_query_example.c b/docs-examples/c/async_query_example.c index 77002891bb4c03f7c7e32b329678e8a124f12a99..b370420b124a21b05f8e0b4041fb1461b1e2478a 100644 --- a/docs-examples/c/async_query_example.c +++ b/docs-examples/c/async_query_example.c @@ -155,7 +155,7 @@ void *select_callback(void *param, TAOS_RES *res, int code) { printHeader(res); taos_fetch_rows_a(res, fetch_row_callback, _taos); } else { - printf("failed to exeuce taos_query. error: %s\n", taos_errstr(res)); + printf("failed to execute taos_query. error: %s\n", taos_errstr(res)); taos_free_result(res); taos_close(_taos); taos_cleanup(); @@ -182,14 +182,14 @@ int main() { // query callback ... // ts current voltage phase location groupid // numOfRow = 8 -// 1538548685000 10.300000 219 0.310000 beijing.chaoyang 2 -// 1538548695000 12.600000 218 0.330000 beijing.chaoyang 2 -// 1538548696800 12.300000 221 0.310000 beijing.chaoyang 2 -// 1538548696650 10.300000 218 0.250000 beijing.chaoyang 3 -// 1538548685500 11.800000 221 0.280000 beijing.haidian 2 -// 1538548696600 13.400000 223 0.290000 beijing.haidian 2 -// 1538548685000 10.800000 223 0.290000 beijing.haidian 3 -// 1538548686500 11.500000 221 0.350000 beijing.haidian 3 +// 1538548685500 11.800000 221 0.280000 california.losangeles 2 +// 1538548696600 13.400000 223 0.290000 california.losangeles 2 +// 1538548685000 10.800000 223 0.290000 california.losangeles 3 +// 1538548686500 11.500000 221 0.350000 california.losangeles 3 +// 1538548685000 10.300000 219 0.310000 california.sanfrancisco 2 +// 1538548695000 12.600000 218 0.330000 california.sanfrancisco 2 +// 1538548696800 12.300000 221 0.310000 california.sanfrancisco 2 +// 1538548696650 10.300000 218 0.250000 california.sanfrancisco 3 // numOfRow = 0 // no more data, close the connection. // ANCHOR_END: demo \ No newline at end of file diff --git a/docs-examples/c/connect_example.c b/docs-examples/c/connect_example.c index ff0891e08267840fd5141d1b4271109d832c1c51..1a23df4806d7ff986898734e1971f6e0cd7c5360 100644 --- a/docs-examples/c/connect_example.c +++ b/docs-examples/c/connect_example.c @@ -13,9 +13,9 @@ int main() { uint16_t port = 0; // 0 means use the default port TAOS *taos = taos_connect(host, user, passwd, db, port); if (taos == NULL) { - int errono = taos_errno(NULL); + int errno = taos_errno(NULL); char *msg = taos_errstr(NULL); - printf("%d, %s\n", errono, msg); + printf("%d, %s\n", errno, msg); } else { printf("connected\n"); taos_close(taos); diff --git a/docs-examples/c/error_handle_example.c b/docs-examples/c/error_handle_example.c index 36bb7f12f77a46230add5af82b68e6fb86ddfe77..e7dedb263df250f6634aa15fab2729cbaf4e5972 100644 --- a/docs-examples/c/error_handle_example.c +++ b/docs-examples/c/error_handle_example.c @@ -13,9 +13,9 @@ int main() { uint16_t port = 0; // 0 means use the default port TAOS *taos = taos_connect(host, user, passwd, db, port); if (taos == NULL) { - int errono = taos_errno(NULL); + int errno = taos_errno(NULL); char *msg = taos_errstr(NULL); - printf("%d, %s\n", errono, msg); + printf("%d, %s\n", errno, msg); } else { printf("connected\n"); taos_close(taos); diff --git a/docs-examples/c/insert_example.c b/docs-examples/c/insert_example.c index ca12be9314efbda707dbd05449c746794c209743..ce8fdc5b9372aec7b02d3c9254ec25c4c4f62adc 100644 --- a/docs-examples/c/insert_example.c +++ b/docs-examples/c/insert_example.c @@ -36,10 +36,10 @@ int main() { executeSQL(taos, "CREATE DATABASE power"); executeSQL(taos, "USE power"); executeSQL(taos, "CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); - executeSQL(taos, "INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)" - "d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)" - "d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)" - "d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"); + executeSQL(taos, "INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)" + "d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)" + "d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)" + "d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"); taos_close(taos); taos_cleanup(); } diff --git a/docs-examples/c/json_protocol_example.c b/docs-examples/c/json_protocol_example.c index 182fd201308facc80c76f36cfa57580784d70413..9d276127a64c3d74322e30587ab2e319c29cbf65 100644 --- a/docs-examples/c/json_protocol_example.c +++ b/docs-examples/c/json_protocol_example.c @@ -29,11 +29,11 @@ int main() { executeSQL(taos, "USE test"); char *line = "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": " - "\"Beijing.Chaoyang\", \"groupid\": 2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, " - "\"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}},{\"metric\": \"meters.current\", " - "\"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": " + "\"California.SanFrancisco\", \"groupid\": 2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, " + "\"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}},{\"metric\": \"meters.current\", " + "\"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": " "2}},{\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": " - "\"Beijing.Haidian\", \"groupid\": 1}}]"; + "\"California.LosAngeles\", \"groupid\": 1}}]"; char *lines[] = {line}; TAOS_RES *res = taos_schemaless_insert(taos, lines, 1, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED); diff --git a/docs-examples/c/line_example.c b/docs-examples/c/line_example.c index 8dd4b1a5075369625645959da0476b76b9fbf290..ce39f8d9df744082a450ce246529bf56adebd1e0 100644 --- a/docs-examples/c/line_example.c +++ b/docs-examples/c/line_example.c @@ -27,10 +27,10 @@ int main() { executeSQL(taos, "DROP DATABASE IF EXISTS test"); executeSQL(taos, "CREATE DATABASE test"); executeSQL(taos, "USE test"); - char *lines[] = {"meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"}; + char *lines[] = {"meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"}; TAOS_RES *res = taos_schemaless_insert(taos, lines, 4, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); if (taos_errno(res) != 0) { printf("failed to insert schema-less data, reason: %s\n", taos_errstr(res)); diff --git a/docs-examples/c/multi_bind_example.c b/docs-examples/c/multi_bind_example.c index fe11df9caad3e216fbd0b1ff2f40a54fe3ba86e5..02e6568e9e88ac8703a4993ed406e770d23c2438 100644 --- a/docs-examples/c/multi_bind_example.c +++ b/docs-examples/c/multi_bind_example.c @@ -52,7 +52,7 @@ void insertData(TAOS *taos) { checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare"); // bind table name and tags TAOS_BIND tags[2]; - char *location = "Beijing.Chaoyang"; + char *location = "California.SanFrancisco"; int groupId = 2; tags[0].buffer_type = TSDB_DATA_TYPE_BINARY; tags[0].buffer_length = strlen(location); diff --git a/docs-examples/c/query_example.c b/docs-examples/c/query_example.c index 4314ac4fe2f5b5251af2462bf0b20ebeed7cac5e..fcae95bcd45a282eaa3ae911b4115e6300c6af8e 100644 --- a/docs-examples/c/query_example.c +++ b/docs-examples/c/query_example.c @@ -128,7 +128,7 @@ int main() { } TAOS_RES *res = taos_query(taos, "SELECT * FROM meters LIMIT 2"); if (taos_errno(res) != 0) { - printf("failed to exeuce taos_query. error: %s\n", taos_errstr(res)); + printf("failed to execute taos_query. error: %s\n", taos_errstr(res)); exit(EXIT_FAILURE); } printResult(res); @@ -139,5 +139,5 @@ int main() { // output: // ts current voltage phase location groupid -// 1648432611249 10.300000 219 0.310000 Beijing.Chaoyang 2 -// 1648432611749 12.600000 218 0.330000 Beijing.Chaoyang 2 \ No newline at end of file +// 1648432611249 10.300000 219 0.310000 California.SanFrancisco 2 +// 1648432611749 12.600000 218 0.330000 California.SanFrancisco 2 \ No newline at end of file diff --git a/docs-examples/c/stmt_example.c b/docs-examples/c/stmt_example.c index fab1506f953ef68050e4318406fa2ba1a0202929..28dae5f9d5ea2faec0aa3c0a784d39e252651c65 100644 --- a/docs-examples/c/stmt_example.c +++ b/docs-examples/c/stmt_example.c @@ -59,7 +59,7 @@ void insertData(TAOS *taos) { checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare"); // bind table name and tags TAOS_BIND tags[2]; - char* location = "Beijing.Chaoyang"; + char* location = "California.SanFrancisco"; int groupId = 2; tags[0].buffer_type = TSDB_DATA_TYPE_BINARY; tags[0].buffer_length = strlen(location); diff --git a/docs-examples/c/subscribe_demo.c b/docs-examples/c/subscribe_demo.c index b523b4667e08ae8a02f4a470c939091f216d1dcb..2fe62c24eb92d2f57c24b40fc16f47d62ea5e378 100644 --- a/docs-examples/c/subscribe_demo.c +++ b/docs-examples/c/subscribe_demo.c @@ -46,7 +46,7 @@ int main() { exit(EXIT_FAILURE); } - int restart = 1; // if the topic already exists, where to subscribe from the begine. + int restart = 1; // if the topic already exists, where to subscribe from the begin. const char* topic = "topic-meter-current-bg-10"; const char* sql = "select * from power.meters where current > 10"; void* param = NULL; // additional parameter. @@ -58,7 +58,7 @@ int main() { getchar(); // press Enter to stop printf("total rows consumed: %d\n", nTotalRows); - int keep = 0; // weather to keep subscribe process + int keep = 0; // whether to keep subscribe process taos_unsubscribe(tsub, keep); taos_close(taos); diff --git a/docs-examples/c/telnet_line_example.c b/docs-examples/c/telnet_line_example.c index 913d433f6aec07b3bce115d45536ffa4b45a0481..da62da4ba492856b0d73a564c1bf9cdd60b5b742 100644 --- a/docs-examples/c/telnet_line_example.c +++ b/docs-examples/c/telnet_line_example.c @@ -28,14 +28,14 @@ int main() { executeSQL(taos, "CREATE DATABASE test"); executeSQL(taos, "USE test"); char *lines[] = { - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", }; TAOS_RES *res = taos_schemaless_insert(taos, lines, 8, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED); if (taos_errno(res) != 0) { diff --git a/docs-examples/csharp/AsyncQueryExample.cs b/docs-examples/csharp/AsyncQueryExample.cs index fe30d21efe82e8d1dc414bd4723227ca93bc944f..3dabbebd1630a207af2e1b1b11cc4ba15bdd94a9 100644 --- a/docs-examples/csharp/AsyncQueryExample.cs +++ b/docs-examples/csharp/AsyncQueryExample.cs @@ -224,15 +224,15 @@ namespace TDengineExample } //output: -//Connect to TDengine success -//8 rows async retrieved - -//1538548685000 | 10.3 | 219 | 0.31 | beijing.chaoyang | 2 | -//1538548695000 | 12.6 | 218 | 0.33 | beijing.chaoyang | 2 | -//1538548696800 | 12.3 | 221 | 0.31 | beijing.chaoyang | 2 | -//1538548696650 | 10.3 | 218 | 0.25 | beijing.chaoyang | 3 | -//1538548685500 | 11.8 | 221 | 0.28 | beijing.haidian | 2 | -//1538548696600 | 13.4 | 223 | 0.29 | beijing.haidian | 2 | -//1538548685000 | 10.8 | 223 | 0.29 | beijing.haidian | 3 | -//1538548686500 | 11.5 | 221 | 0.35 | beijing.haidian | 3 | -//async retrieve complete. \ No newline at end of file +// Connect to TDengine success +// 8 rows async retrieved + +// 1538548685500 | 11.8 | 221 | 0.28 | california.losangeles | 2 | +// 1538548696600 | 13.4 | 223 | 0.29 | california.losangeles | 2 | +// 1538548685000 | 10.8 | 223 | 0.29 | california.losangeles | 3 | +// 1538548686500 | 11.5 | 221 | 0.35 | california.losangeles | 3 | +// 1538548685000 | 10.3 | 219 | 0.31 | california.sanfrancisco | 2 | +// 1538548695000 | 12.6 | 218 | 0.33 | california.sanfrancisco | 2 | +// 1538548696800 | 12.3 | 221 | 0.31 | california.sanfrancisco | 2 | +// 1538548696650 | 10.3 | 218 | 0.25 | california.sanfrancisco | 3 | +// async retrieve complete. \ No newline at end of file diff --git a/docs-examples/csharp/InfluxDBLineExample.cs b/docs-examples/csharp/InfluxDBLineExample.cs index 7aad08825209db568d61e5963ec7a00034ab7ca7..7b4453f4ac0b14dd76d166e395bdacb46a5d3fbc 100644 --- a/docs-examples/csharp/InfluxDBLineExample.cs +++ b/docs-examples/csharp/InfluxDBLineExample.cs @@ -9,10 +9,10 @@ namespace TDengineExample IntPtr conn = GetConnection(); PrepareDatabase(conn); string[] lines = { - "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250" + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250" }; IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS); if (TDengine.ErrorNo(res) != 0) diff --git a/docs-examples/csharp/OptsJsonExample.cs b/docs-examples/csharp/OptsJsonExample.cs index d774a325afa1a8d93eb858f23dcd97dd29f8653d..2c41acc5c9628befda7eb4ad5c30af5b921de948 100644 --- a/docs-examples/csharp/OptsJsonExample.cs +++ b/docs-examples/csharp/OptsJsonExample.cs @@ -8,10 +8,10 @@ namespace TDengineExample { IntPtr conn = GetConnection(); PrepareDatabase(conn); - string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}, " + - "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}]" + string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " + + "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]" }; IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED); diff --git a/docs-examples/csharp/OptsTelnetExample.cs b/docs-examples/csharp/OptsTelnetExample.cs index 81608c32213fa0618a2ca6e0769aacf8e9c8e64d..bb752db1afbbb2ef68df9ca25314c8b91cd9a266 100644 --- a/docs-examples/csharp/OptsTelnetExample.cs +++ b/docs-examples/csharp/OptsTelnetExample.cs @@ -9,14 +9,14 @@ namespace TDengineExample IntPtr conn = GetConnection(); PrepareDatabase(conn); string[] lines = { - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", }; IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED); if (TDengine.ErrorNo(res) != 0) diff --git a/docs-examples/csharp/QueryExample.cs b/docs-examples/csharp/QueryExample.cs index f00e391100c7ce42177e2987f5b0b32dc02262c4..97f0c456d412e2ed608c345ba87469d3f5ccfc15 100644 --- a/docs-examples/csharp/QueryExample.cs +++ b/docs-examples/csharp/QueryExample.cs @@ -158,5 +158,5 @@ namespace TDengineExample // Connect to TDengine success // fieldCount=6 // ts current voltage phase location groupid -// 1648432611249 10.3 219 0.31 Beijing.Chaoyang 2 -// 1648432611749 12.6 218 0.33 Beijing.Chaoyang 2 \ No newline at end of file +// 1648432611249 10.3 219 0.31 California.SanFrancisco 2 +// 1648432611749 12.6 218 0.33 California.SanFrancisco 2 \ No newline at end of file diff --git a/docs-examples/csharp/SQLInsertExample.cs b/docs-examples/csharp/SQLInsertExample.cs index fa2e2a50daf06f4d948479e7f5b0df82c517f809..d5462c1062e01fd5c93bac983696d0350117ad92 100644 --- a/docs-examples/csharp/SQLInsertExample.cs +++ b/docs-examples/csharp/SQLInsertExample.cs @@ -15,10 +15,10 @@ namespace TDengineExample CheckRes(conn, res, "failed to change database"); res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); CheckRes(conn, res, "failed to create stable"); - var sql = "INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + - "d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + - "d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + - "d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + var sql = "INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " + + "d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " + + "d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " + + "d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; res = TDengine.Query(conn, sql); CheckRes(conn, res, "failed to insert data"); int affectedRows = TDengine.AffectRows(res); diff --git a/docs-examples/csharp/StmtInsertExample.cs b/docs-examples/csharp/StmtInsertExample.cs index d6e00dd4ac54ab8dbfc33b93896d19fc585e7642..6ade424b95d64529b7a40a782de13e3106d0c78a 100644 --- a/docs-examples/csharp/StmtInsertExample.cs +++ b/docs-examples/csharp/StmtInsertExample.cs @@ -21,7 +21,7 @@ namespace TDengineExample CheckStmtRes(res, "failed to prepare stmt"); // 2. bind table name and tags - TAOS_BIND[] tags = new TAOS_BIND[2] { TaosBind.BindBinary("Beijing.Chaoyang"), TaosBind.BindInt(2) }; + TAOS_BIND[] tags = new TAOS_BIND[2] { TaosBind.BindBinary("California.SanFrancisco"), TaosBind.BindInt(2) }; res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags); CheckStmtRes(res, "failed to bind table name and tags"); diff --git a/docs-examples/go/connect/cgoexample/main.go b/docs-examples/go/connect/cgoexample/main.go index 8b9aba4ce4217c00605bc8796c788f3dd52805e6..ba7ed0f728a1cd546dbc3199ce4c0dc854ebee91 100644 --- a/docs-examples/go/connect/cgoexample/main.go +++ b/docs-examples/go/connect/cgoexample/main.go @@ -20,4 +20,4 @@ func main() { // use // var taosDSN = "root:taosdata@tcp(localhost:6030)/dbName" -// if you want to connect to a default database. +// if you want to connect a specified database named "dbName". diff --git a/docs-examples/go/connect/restexample/main.go b/docs-examples/go/connect/restexample/main.go index 9c05e7eed80dee4ae7e6b20637d265f388d7438d..1efc98b988c183c4c680884057bf2a72a9dd19e9 100644 --- a/docs-examples/go/connect/restexample/main.go +++ b/docs-examples/go/connect/restexample/main.go @@ -18,6 +18,6 @@ func main() { defer taos.Close() } -// use +// use // var taosDSN = "root:taosdata@http(localhost:6041)/dbName" -// if you want to connect to a default database. +// if you want to connect a specified database named "dbName". diff --git a/docs-examples/go/insert/json/main.go b/docs-examples/go/insert/json/main.go index 47d9e9984adc05896fb9954ad3deffde3764b836..6be375270e32a5091c015f88de52c9dda2246b59 100644 --- a/docs-examples/go/insert/json/main.go +++ b/docs-examples/go/insert/json/main.go @@ -25,10 +25,10 @@ func main() { defer conn.Close() prepareDatabase(conn) - payload := `[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "Beijing.Haidian", "groupid": 1}}, - {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}]` + payload := `[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, + {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]` err = conn.OpenTSDBInsertJsonPayload(payload) if err != nil { diff --git a/docs-examples/go/insert/line/main.go b/docs-examples/go/insert/line/main.go index bbc41468fe5f13d3e6f896445bb88f3eba584d0f..c17e1a5270850e6a8b497e0dbec4ae714ee1e2d6 100644 --- a/docs-examples/go/insert/line/main.go +++ b/docs-examples/go/insert/line/main.go @@ -25,10 +25,10 @@ func main() { defer conn.Close() prepareDatabase(conn) var lines = []string{ - "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250", + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250", } err = conn.InfluxDBInsertLines(lines, "ms") diff --git a/docs-examples/go/insert/sql/main.go b/docs-examples/go/insert/sql/main.go index 91386855334c1930af721e0b4f43395c6a6d8e82..6cd5f860e65f4fffd139668f69cc1772f5310eae 100644 --- a/docs-examples/go/insert/sql/main.go +++ b/docs-examples/go/insert/sql/main.go @@ -19,10 +19,10 @@ func createStable(taos *sql.DB) { } func insertData(taos *sql.DB) { - sql := `INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)` + sql := `INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)` result, err := taos.Exec(sql) if err != nil { fmt.Println("failed to insert, err:", err) diff --git a/docs-examples/go/insert/stmt/main.go b/docs-examples/go/insert/stmt/main.go index c50200ebb427c4c64c2737cb8fe4c3d287551a34..7093fdf1e52bc5a14fc92cec995fd81e70717d9f 100644 --- a/docs-examples/go/insert/stmt/main.go +++ b/docs-examples/go/insert/stmt/main.go @@ -37,7 +37,7 @@ func main() { checkErr(err, "failed to create prepare statement") // bind table name and tags - tagParams := param.NewParam(2).AddBinary([]byte("Beijing.Chaoyang")).AddInt(2) + tagParams := param.NewParam(2).AddBinary([]byte("California.SanFrancisco")).AddInt(2) err = stmt.SetTableNameWithTags("d1001", tagParams) checkErr(err, "failed to execute SetTableNameWithTags") diff --git a/docs-examples/go/insert/telnet/main.go b/docs-examples/go/insert/telnet/main.go index 879e6d5cece74fd0b7c815dd34614dca3c9d4544..91fafbe71adbf60d9341b903f5a25708b7011852 100644 --- a/docs-examples/go/insert/telnet/main.go +++ b/docs-examples/go/insert/telnet/main.go @@ -25,14 +25,14 @@ func main() { defer conn.Close() prepareDatabase(conn) var lines = []string{ - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", } err = conn.OpenTSDBInsertTelnetLines(lines) diff --git a/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java b/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java index c6ce2ef9785a010daa55ad29415f81711760cd57..84292f7e8682dbb8171c807da74a603f4ae8256e 100644 --- a/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/JNIConnectExample.java @@ -22,4 +22,4 @@ public class JNIConnectExample { // use // String jdbcUrl = "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata"; -// if you want to connect to a default database. \ No newline at end of file +// if you want to connect a specified database named "dbName". \ No newline at end of file diff --git a/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java b/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java index cb83424576a4fd7dfa09ea297294ed77b66bd12d..c8e649482fbd747cdc238daa9e7a237cf63295b6 100644 --- a/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/JSONProtocolExample.java @@ -23,10 +23,10 @@ public class JSONProtocolExample { } private static String getJSONData() { - return "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}, " + - "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"Beijing.Chaoyang\", \"groupid\": 2}}," + - " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"Beijing.Haidian\", \"groupid\": 1}}]"; + return "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " + + "{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," + + " {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]"; } public static void main(String[] args) throws SQLException { diff --git a/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java b/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java index 8a2eabe0a91f7966cc3cc6b7dfeeb71b71b88d92..990922b7a516bd32a7e299f5743bd1b5e321868a 100644 --- a/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/LineProtocolExample.java @@ -12,11 +12,11 @@ import java.sql.Statement; public class LineProtocolExample { // format: measurement,tag_set field_set timestamp private static String[] lines = { - "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", // micro + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", // micro // seconds - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", }; private static Connection getConnection() throws SQLException { diff --git a/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java b/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java index de89f26cbe38f9343d60aeb8d3e9ce7f67c2e764..af97fe4373ca964260e5614f133f359e229b0e15 100644 --- a/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/RestInsertExample.java @@ -16,28 +16,28 @@ public class RestInsertExample { private static List getRawData() { return Arrays.asList( - "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2", - "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3", - "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2", - "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2", - "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3", - "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3" + "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2", + "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3", + "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2", + "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2", + "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3", + "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3" ); } /** * The generated SQL is: - * INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) - * power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) - * power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) - * power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) - * power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) - * power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) - * power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) - * power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000) + * INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) + * power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) + * power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) + * power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) + * power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) + * power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) + * power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) + * power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000) */ private static String getSQL() { StringBuilder sb = new StringBuilder("INSERT INTO "); diff --git a/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java b/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java index b1a1d224c6d9af2b83ac039726dcdb49a33ec2b0..a3581a1f4733e8bf3e3f561bb6cab5a725d8a1c0 100644 --- a/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/RestQueryExample.java @@ -51,5 +51,5 @@ public class RestQueryExample { // possible output: // avg(voltage) location -// 222.0 Beijing.Haidian -// 219.0 Beijing.Chaoyang +// 222.0 California.LosAngeles +// 219.0 California.SanFrancisco diff --git a/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java b/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java index 2a7ccebf41cae1a22d7516966e2c6ffb10011b64..bbcc92b22f67c31384b0fb7a082975eaac2ff2bc 100644 --- a/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/StmtInsertExample.java @@ -30,14 +30,14 @@ public class StmtInsertExample { private static List getRawData() { return Arrays.asList( - "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2", - "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3", - "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2", - "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2", - "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3", - "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3" + "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2", + "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3", + "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2", + "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2", + "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3", + "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3" ); } diff --git a/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java b/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java index 1431eccf16dabaac20f60ae7e971ef49707ba509..4c9368288df74f829121aeab5b925d1d083d29f0 100644 --- a/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java +++ b/docs-examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java @@ -11,14 +11,14 @@ import java.sql.Statement; public class TelnetLineProtocolExample { // format: =[ =] - private static String[] lines = { "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + private static String[] lines = { "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", }; private static Connection getConnection() throws SQLException { diff --git a/docs-examples/java/src/test/java/com/taos/test/TestAll.java b/docs-examples/java/src/test/java/com/taos/test/TestAll.java index 92fe14a49d5f5ea5d7ea5f1d809867b3de0cc9d2..42db24485afec05298159f7b0c3a4e15835d98ed 100644 --- a/docs-examples/java/src/test/java/com/taos/test/TestAll.java +++ b/docs-examples/java/src/test/java/com/taos/test/TestAll.java @@ -23,16 +23,16 @@ public class TestAll { String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; try (Connection conn = DriverManager.getConnection(jdbcUrl)) { try (Statement stmt = conn.createStatement()) { - String sql = "INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" + - " power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" + - " power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" + - " power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" + - " power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" + - " power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" + - " power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)"; + String sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" + + " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" + + " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" + + " power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" + + " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" + + " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" + + " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)"; stmt.execute(sql); } diff --git a/docs-examples/node/nativeexample/influxdb_line_example.js b/docs-examples/node/nativeexample/influxdb_line_example.js index a9fc6d11df0b335b92bb3292baaa017cb4bc42ea..2050bee54506a3ee6fe7d89de97b3b41334dd4a6 100644 --- a/docs-examples/node/nativeexample/influxdb_line_example.js +++ b/docs-examples/node/nativeexample/influxdb_line_example.js @@ -13,10 +13,10 @@ function createDatabase() { function insertData() { const lines = [ - "meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250", + "meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250", ]; cursor.schemalessInsert( lines, diff --git a/docs-examples/node/nativeexample/insert_example.js b/docs-examples/node/nativeexample/insert_example.js index 85a353f889176655654d8c39c9a905054d3b6622..ade9d83158362cbf00a856b43a973de31def7601 100644 --- a/docs-examples/node/nativeexample/insert_example.js +++ b/docs-examples/node/nativeexample/insert_example.js @@ -11,10 +11,10 @@ try { cursor.execute( "CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)" ); - var sql = `INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) -power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) -power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) -power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`; + var sql = `INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) +power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) +power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) +power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`; cursor.execute(sql); } finally { cursor.close(); diff --git a/docs-examples/node/nativeexample/multi_bind_example.js b/docs-examples/node/nativeexample/multi_bind_example.js index d52581ec8e10c6edfbc8fc8f7ca78512b5c93d74..6ef8b30c097393fef8c6a2837f8683c736b363f1 100644 --- a/docs-examples/node/nativeexample/multi_bind_example.js +++ b/docs-examples/node/nativeexample/multi_bind_example.js @@ -25,7 +25,7 @@ function insertData() { // bind table name and tags let tagBind = new taos.TaosBind(2); - tagBind.bindBinary("Beijing.Chaoyang"); + tagBind.bindBinary("California.SanFrancisco"); tagBind.bindInt(2); cursor.stmtSetTbnameTags("d1001", tagBind.getBind()); diff --git a/docs-examples/node/nativeexample/opentsdb_json_example.js b/docs-examples/node/nativeexample/opentsdb_json_example.js index 6d436a8e9ebe0230bba22064e8fb6c180c14b5d1..2d78444a3f805bc77ab5e11925a28dd18fe221fe 100644 --- a/docs-examples/node/nativeexample/opentsdb_json_example.js +++ b/docs-examples/node/nativeexample/opentsdb_json_example.js @@ -17,25 +17,25 @@ function insertData() { metric: "meters.current", timestamp: 1648432611249, value: 10.3, - tags: { location: "Beijing.Chaoyang", groupid: 2 }, + tags: { location: "California.SanFrancisco", groupid: 2 }, }, { metric: "meters.voltage", timestamp: 1648432611249, value: 219, - tags: { location: "Beijing.Haidian", groupid: 1 }, + tags: { location: "California.LosAngeles", groupid: 1 }, }, { metric: "meters.current", timestamp: 1648432611250, value: 12.6, - tags: { location: "Beijing.Chaoyang", groupid: 2 }, + tags: { location: "California.SanFrancisco", groupid: 2 }, }, { metric: "meters.voltage", timestamp: 1648432611250, value: 221, - tags: { location: "Beijing.Haidian", groupid: 1 }, + tags: { location: "California.LosAngeles", groupid: 1 }, }, ]; diff --git a/docs-examples/node/nativeexample/opentsdb_telnet_example.js b/docs-examples/node/nativeexample/opentsdb_telnet_example.js index 01e79c2dcacd923cd708d1d228959a628d0ff26a..7f80f558838e18f07ad79e580e7d08638b74e940 100644 --- a/docs-examples/node/nativeexample/opentsdb_telnet_example.js +++ b/docs-examples/node/nativeexample/opentsdb_telnet_example.js @@ -13,14 +13,14 @@ function createDatabase() { function insertData() { const lines = [ - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", ]; cursor.schemalessInsert( lines, diff --git a/docs-examples/node/nativeexample/param_bind_example.js b/docs-examples/node/nativeexample/param_bind_example.js index 9117f46c3eeabd9009b72fa9d4a8503e65884242..c7e04c71a0d19ff8666f3d43fe09109009741266 100644 --- a/docs-examples/node/nativeexample/param_bind_example.js +++ b/docs-examples/node/nativeexample/param_bind_example.js @@ -24,7 +24,7 @@ function insertData() { // bind table name and tags let tagBind = new taos.TaosBind(2); - tagBind.bindBinary("Beijing.Chaoyang"); + tagBind.bindBinary("California.SanFrancisco"); tagBind.bindInt(2); cursor.stmtSetTbnameTags("d1001", tagBind.getBind()); diff --git a/docs-examples/php/connect.php b/docs-examples/php/connect.php index 5af77b9768e5c5ac4b774b433479a4ac8902beda..b825b447805a3923248042d2cdff79c51bdcdbe3 100644 --- a/docs-examples/php/connect.php +++ b/docs-examples/php/connect.php @@ -4,7 +4,7 @@ use TDengine\Connection; use TDengine\Exception\TDengineException; try { - // 实例化 + // instantiate $host = 'localhost'; $port = 6030; $username = 'root'; @@ -12,9 +12,9 @@ try { $dbname = null; $connection = new Connection($host, $port, $username, $password, $dbname); - // 连接 + // connect $connection->connect(); } catch (TDengineException $e) { - // 连接失败捕获异常 + // throw exception throw $e; } diff --git a/docs-examples/php/insert.php b/docs-examples/php/insert.php index 0d9cfc4843a2ec3e72d0ad128fa4c2650d6b9cf6..6e38fa0c46d31aa0a939d471ccbd255cfa453a16 100644 --- a/docs-examples/php/insert.php +++ b/docs-examples/php/insert.php @@ -4,7 +4,7 @@ use TDengine\Connection; use TDengine\Exception\TDengineException; try { - // 实例化 + // instantiate $host = 'localhost'; $port = 6030; $username = 'root'; @@ -12,22 +12,22 @@ try { $dbname = 'power'; $connection = new Connection($host, $port, $username, $password, $dbname); - // 连接 + // connect $connection->connect(); - // 插入 + // insert $connection->query('CREATE DATABASE if not exists power'); $connection->query('CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)'); $resource = $connection->query(<<<'SQL' - INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) + INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) SQL); - // 影响行数 + // get affected rows var_dump($resource->affectedRows()); } catch (TDengineException $e) { - // 捕获异常 + // throw exception throw $e; } diff --git a/docs-examples/php/insert_stmt.php b/docs-examples/php/insert_stmt.php index 5d4b4809d215d781807c21172982feff2171fe07..99a9a6aef3f69a8880316355e17396e06ca985c9 100644 --- a/docs-examples/php/insert_stmt.php +++ b/docs-examples/php/insert_stmt.php @@ -4,7 +4,7 @@ use TDengine\Connection; use TDengine\Exception\TDengineException; try { - // 实例化 + // instantiate $host = 'localhost'; $port = 6030; $username = 'root'; @@ -12,18 +12,18 @@ try { $dbname = 'power'; $connection = new Connection($host, $port, $username, $password, $dbname); - // 连接 + // connect $connection->connect(); - // 插入 + // insert $connection->query('CREATE DATABASE if not exists power'); $connection->query('CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)'); $stmt = $connection->prepare('INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)'); - // 设置表名和标签 + // set table name and tags $stmt->setTableNameTags('d1001', [ // 支持格式同参数绑定 - [TDengine\TSDB_DATA_TYPE_BINARY, 'Beijing.Chaoyang'], + [TDengine\TSDB_DATA_TYPE_BINARY, 'California.SanFrancisco'], [TDengine\TSDB_DATA_TYPE_INT, 2], ]); @@ -41,9 +41,9 @@ try { ]); $resource = $stmt->execute(); - // 影响行数 + // get affected rows var_dump($resource->affectedRows()); } catch (TDengineException $e) { - // 捕获异常 + // throw exception throw $e; } diff --git a/docs-examples/php/query.php b/docs-examples/php/query.php index 4e86a2cec7426887686049977a8647e786ac2744..2607940ea06a70eaa30e4c165c05bd72aa89857c 100644 --- a/docs-examples/php/query.php +++ b/docs-examples/php/query.php @@ -4,7 +4,7 @@ use TDengine\Connection; use TDengine\Exception\TDengineException; try { - // 实例化 + // instantiate $host = 'localhost'; $port = 6030; $username = 'root'; @@ -12,12 +12,12 @@ try { $dbname = 'power'; $connection = new Connection($host, $port, $username, $password, $dbname); - // 连接 + // connect $connection->connect(); $resource = $connection->query('SELECT ts, current FROM meters LIMIT 2'); var_dump($resource->fetch()); } catch (TDengineException $e) { - // 捕获异常 + // throw exception throw $e; } diff --git a/docs-examples/python/bind_param_example.py b/docs-examples/python/bind_param_example.py index 503a2eb5dd91a3516f87a4d3c1c3218cb6505236..6a67434f876f159cf32069a55e9527ca19034640 100644 --- a/docs-examples/python/bind_param_example.py +++ b/docs-examples/python/bind_param_example.py @@ -2,14 +2,14 @@ import taos from datetime import datetime # note: lines have already been sorted by table name -lines = [('d1001', '2018-10-03 14:38:05.000', 10.30000, 219, 0.31000, 'Beijing.Chaoyang', 2), - ('d1001', '2018-10-03 14:38:15.000', 12.60000, 218, 0.33000, 'Beijing.Chaoyang', 2), - ('d1001', '2018-10-03 14:38:16.800', 12.30000, 221, 0.31000, 'Beijing.Chaoyang', 2), - ('d1002', '2018-10-03 14:38:16.650', 10.30000, 218, 0.25000, 'Beijing.Chaoyang', 3), - ('d1003', '2018-10-03 14:38:05.500', 11.80000, 221, 0.28000, 'Beijing.Haidian', 2), - ('d1003', '2018-10-03 14:38:16.600', 13.40000, 223, 0.29000, 'Beijing.Haidian', 2), - ('d1004', '2018-10-03 14:38:05.000', 10.80000, 223, 0.29000, 'Beijing.Haidian', 3), - ('d1004', '2018-10-03 14:38:06.500', 11.50000, 221, 0.35000, 'Beijing.Haidian', 3)] +lines = [('d1001', '2018-10-03 14:38:05.000', 10.30000, 219, 0.31000, 'California.SanFrancisco', 2), + ('d1001', '2018-10-03 14:38:15.000', 12.60000, 218, 0.33000, 'California.SanFrancisco', 2), + ('d1001', '2018-10-03 14:38:16.800', 12.30000, 221, 0.31000, 'California.SanFrancisco', 2), + ('d1002', '2018-10-03 14:38:16.650', 10.30000, 218, 0.25000, 'California.SanFrancisco', 3), + ('d1003', '2018-10-03 14:38:05.500', 11.80000, 221, 0.28000, 'California.LosAngeles', 2), + ('d1003', '2018-10-03 14:38:16.600', 13.40000, 223, 0.29000, 'California.LosAngeles', 2), + ('d1004', '2018-10-03 14:38:05.000', 10.80000, 223, 0.29000, 'California.LosAngeles', 3), + ('d1004', '2018-10-03 14:38:06.500', 11.50000, 221, 0.35000, 'California.LosAngeles', 3)] def get_ts(ts: str): diff --git a/docs-examples/python/conn_native_pandas.py b/docs-examples/python/conn_native_pandas.py index 314759f7662c7bf4c9df2c8b3396ad3101c91cd4..56942ef57085766cd128b03cabb7a357587eab16 100644 --- a/docs-examples/python/conn_native_pandas.py +++ b/docs-examples/python/conn_native_pandas.py @@ -13,7 +13,7 @@ print(df.head(3)) # output: # RangeIndex(start=0, stop=8, step=1) # -# ts current voltage phase location groupid -# 0 2018-10-03 14:38:05.000 10.3 219 0.31 beijing.chaoyang 2 -# 1 2018-10-03 14:38:15.000 12.6 218 0.33 beijing.chaoyang 2 -# 2 2018-10-03 14:38:16.800 12.3 221 0.31 beijing.chaoyang 2 +# ts current ... location groupid +# 0 2018-10-03 14:38:05.500 11.8 ... california.losangeles 2 +# 1 2018-10-03 14:38:16.600 13.4 ... california.losangeles 2 +# 2 2018-10-03 14:38:05.000 10.8 ... california.losangeles 3 diff --git a/docs-examples/python/conn_rest_pandas.py b/docs-examples/python/conn_rest_pandas.py index 143e4275fa4eda685766297e4b90cba3935a574d..0164080cd5a05e72dce40b1d111ea423623ff9b2 100644 --- a/docs-examples/python/conn_rest_pandas.py +++ b/docs-examples/python/conn_rest_pandas.py @@ -11,9 +11,9 @@ print(type(df.ts[0])) print(df.head(3)) # output: -# # RangeIndex(start=0, stop=8, step=1) -# ts current ... location groupid -# 0 2018-10-03 14:38:05+08:00 10.3 ... beijing.chaoyang 2 -# 1 2018-10-03 14:38:15+08:00 12.6 ... beijing.chaoyang 2 -# 2 2018-10-03 14:38:16.800000+08:00 12.3 ... beijing.chaoyang 2 +# +# ts current ... location groupid +# 0 2018-10-03 06:38:05.500000+00:00 11.8 ... california.losangeles 2 +# 1 2018-10-03 06:38:16.600000+00:00 13.4 ... california.losangeles 2 +# 2 2018-10-03 06:38:05+00:00 10.8 ... california.losangeles 3 diff --git a/docs-examples/python/connect_exmaple.py b/docs-examples/python/connect_example.py similarity index 100% rename from docs-examples/python/connect_exmaple.py rename to docs-examples/python/connect_example.py diff --git a/docs-examples/python/connect_rest_examples.py b/docs-examples/python/connect_rest_examples.py index a043d506b965bc31179dbb6f38749d196ab338ff..3303eb0e194ac28e9486ab153183c3b1f0b639f2 100644 --- a/docs-examples/python/connect_rest_examples.py +++ b/docs-examples/python/connect_rest_examples.py @@ -16,10 +16,10 @@ cursor.execute("CREATE DATABASE power") cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") # insert data -cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") +cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""") print("inserted row count:", cursor.rowcount) # query data @@ -38,8 +38,7 @@ for row in data: # inserted row count: 8 # queried row count: 3 # ['ts', 'current', 'voltage', 'phase', 'location', 'groupid'] -# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.3, 219, 0.31, 'beijing.chaoyang', 2] -# [datetime.datetime(2018, 10, 3, 14, 38, 15, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 12.6, 218, 0.33, 'beijing.chaoyang', 2] -# [datetime.datetime(2018, 10, 3, 14, 38, 16, 800000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 12.3, 221, 0.31, 'beijing.chaoyang', 2] - +# [datetime.datetime(2018, 10, 3, 14, 38, 5, 500000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 11.8, 221, 0.28, 'california.losangeles', 2] +# [datetime.datetime(2018, 10, 3, 14, 38, 16, 600000, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 13.4, 223, 0.29, 'california.losangeles', 2] +# [datetime.datetime(2018, 10, 3, 14, 38, 5, tzinfo=datetime.timezone(datetime.timedelta(seconds=28800), '+08:00')), 10.8, 223, 0.29, 'california.losangeles', 3] # ANCHOR_END: basic diff --git a/docs-examples/python/json_protocol_example.py b/docs-examples/python/json_protocol_example.py index 5bb4d629bccf3d79e74b381d6259de86d6522315..58b38f3ff667bcbbd902434d3409441a4d2c5b45 100644 --- a/docs-examples/python/json_protocol_example.py +++ b/docs-examples/python/json_protocol_example.py @@ -3,12 +3,12 @@ import json import taos from taos import SmlProtocol, SmlPrecision -lines = [{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, +lines = [{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, - "tags": {"location": "Beijing.Haidian", "groupid": 1}}, + "tags": {"location": "California.LosAngeles", "groupid": 1}}, {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, - "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}] + "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}] def get_connection(): diff --git a/docs-examples/python/line_protocol_example.py b/docs-examples/python/line_protocol_example.py index 02baeb2104f9f48984b4d34afb5e67af641d4e32..735e8e7eb8aed1a8133de7a6de50bd50d076c472 100644 --- a/docs-examples/python/line_protocol_example.py +++ b/docs-examples/python/line_protocol_example.py @@ -1,10 +1,10 @@ import taos from taos import SmlProtocol, SmlPrecision -lines = ["meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", +lines = ["meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249300", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611249800", ] diff --git a/docs-examples/python/multi_bind_example.py b/docs-examples/python/multi_bind_example.py index 1714121d72705ab8d619a41f3463af4aa3193871..205ba69fb267ae1781415e4f0995b41f908ceb17 100644 --- a/docs-examples/python/multi_bind_example.py +++ b/docs-examples/python/multi_bind_example.py @@ -3,10 +3,10 @@ from datetime import datetime # ANCHOR: bind_batch table_tags = { - "d1001": ('Beijing.Chaoyang', 2), - "d1002": ('Beijing.Chaoyang', 3), - "d1003": ('Beijing.Haidian', 2), - "d1004": ('Beijing.Haidian', 3) + "d1001": ('California.SanFrancisco', 2), + "d1002": ('California.SanFrancisco', 3), + "d1003": ('California.LosAngeles', 2), + "d1004": ('California.LosAngeles', 3) } table_values = { diff --git a/docs-examples/python/native_insert_example.py b/docs-examples/python/native_insert_example.py index 94d4888a8f5330b9e39d5ae051fcb68f9825505f..3b6b73cb2236c8d9d11019349f99f79135a5c1d6 100644 --- a/docs-examples/python/native_insert_example.py +++ b/docs-examples/python/native_insert_example.py @@ -1,13 +1,13 @@ import taos -lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,Beijing.Chaoyang,2", - "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,Beijing.Haidian,3", - "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,Beijing.Haidian,2", - "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,Beijing.Haidian,3", - "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,Beijing.Chaoyang,3", - "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,Beijing.Chaoyang,2", - "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,Beijing.Chaoyang,2", - "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,Beijing.Haidian,2"] +lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2", + "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3", + "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2", + "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3", + "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3", + "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2", + "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2", + "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2"] def get_connection() -> taos.TaosConnection: @@ -25,10 +25,10 @@ def create_stable(conn: taos.TaosConnection): # The generated SQL is: -# INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) -# d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) -# d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) -# d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) +# INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) +# d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) +# d1003 USING meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) +# d1004 USING meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000) def get_sql(): global lines diff --git a/docs-examples/python/query_example.py b/docs-examples/python/query_example.py index 6d33c49c968d9210b475931b5d8cecca0ceff3e3..8afd7f07358d7e9c9a3677ee04f8eb92aae6856b 100644 --- a/docs-examples/python/query_example.py +++ b/docs-examples/python/query_example.py @@ -12,10 +12,10 @@ def query_api_demo(conn: taos.TaosConnection): # field count: 7 -# meta of files[1]: {name: ts, type: 9, bytes: 8} +# meta of fields[1]: {name: ts, type: 9, bytes: 8} # ======================Iterate on result========================= -# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 5), 10.300000190734863, 219, 0.3100000023841858, 'Beijing.Chaoyang', 2) -# ('d1001', datetime.datetime(2018, 10, 3, 14, 38, 15), 12.600000381469727, 218, 0.33000001311302185, 'Beijing.Chaoyang', 2) +# ('d1003', datetime.datetime(2018, 10, 3, 14, 38, 5, 500000), 11.800000190734863, 221, 0.2800000011920929, 'california.losangeles', 2) +# ('d1003', datetime.datetime(2018, 10, 3, 14, 38, 16, 600000), 13.399999618530273, 223, 0.28999999165534973, 'california.losangeles', 2) # ANCHOR_END: iter # ANCHOR: fetch_all @@ -29,8 +29,8 @@ def fetch_all_demo(conn: taos.TaosConnection): # row count: 2 # ===============all data=================== -# [{'ts': datetime.datetime(2018, 10, 3, 14, 38, 5), 'current': 10.300000190734863}, -# {'ts': datetime.datetime(2018, 10, 3, 14, 38, 15), 'current': 12.600000381469727}] +# [{'ts': datetime.datetime(2018, 10, 3, 14, 38, 5, 500000), 'current': 11.800000190734863}, +# {'ts': datetime.datetime(2018, 10, 3, 14, 38, 16, 600000), 'current': 13.399999618530273}] # ANCHOR_END: fetch_all if __name__ == '__main__': diff --git a/docs-examples/python/telnet_line_protocol_example.py b/docs-examples/python/telnet_line_protocol_example.py index 072835109ee238940e6fe5880b72b2b04e0157fa..d812e186af86be6811ee7774f10458e46df1f39f 100644 --- a/docs-examples/python/telnet_line_protocol_example.py +++ b/docs-examples/python/telnet_line_protocol_example.py @@ -2,14 +2,14 @@ import taos from taos import SmlProtocol, SmlPrecision # format: =[ =] -lines = ["meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", +lines = ["meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", ] diff --git a/docs-examples/rust/nativeexample/examples/stmt_example.rs b/docs-examples/rust/nativeexample/examples/stmt_example.rs index a791a4135984a33dded145e8175d7ade57de8d77..190f8c1ef6d50a8e9c925178c1a9d31c22e3d4df 100644 --- a/docs-examples/rust/nativeexample/examples/stmt_example.rs +++ b/docs-examples/rust/nativeexample/examples/stmt_example.rs @@ -12,7 +12,7 @@ async fn main() -> Result<(), Error> { stmt.set_tbname_tags( "d1001", [ - Field::Binary(BString::from("Beijing.Chaoyang")), + Field::Binary(BString::from("California.SanFrancisco")), Field::Int(2), ], )?; diff --git a/docs-examples/rust/restexample/examples/insert_example.rs b/docs-examples/rust/restexample/examples/insert_example.rs index d7acc98d096fb3cd6bea22d6c5f6f0f5caea50af..9261536f627c297fc707708f88f57eed647dbf3e 100644 --- a/docs-examples/rust/restexample/examples/insert_example.rs +++ b/docs-examples/rust/restexample/examples/insert_example.rs @@ -5,10 +5,10 @@ async fn main() -> Result<(), Error> { let taos = TaosCfg::default().connect().expect("fail to connect"); taos.create_database("power").await?; taos.exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)").await?; - let sql = "INSERT INTO power.d1001 USING power.meters TAGS(Beijing.Chaoyang, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(Beijing.Chaoyang, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) - power.d1003 USING power.meters TAGS(Beijing.Haidian, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) - power.d1004 USING power.meters TAGS(Beijing.Haidian, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; + let sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) + power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) + power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)"; let result = taos.query(sql).await?; println!("{:?}", result); Ok(()) diff --git a/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs b/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs index e93888cc83d12f3bec7370a66e8a85d38cec42ad..64d1a3c9ac6037c16e3e1c3be0258e19cce632a0 100644 --- a/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs +++ b/docs-examples/rust/schemalessexample/examples/influxdb_line_example.rs @@ -5,10 +5,10 @@ fn main() { let taos = TaosCfg::default().connect().expect("fail to connect"); taos.raw_query("CREATE DATABASE test").unwrap(); taos.raw_query("USE test").unwrap(); - let lines = ["meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", - "meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", - "meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", - "meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"]; + let lines = ["meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249", + "meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250", + "meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249", + "meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"]; let affected_rows = taos .schemaless_insert( &lines, diff --git a/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs b/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs index 1d66bd1f2b1bcbe82dc3ee3e8e25ea4c521c81f0..e61691596704c8aaf979081429802df6e5aa86f9 100644 --- a/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs +++ b/docs-examples/rust/schemalessexample/examples/opentsdb_json_example.rs @@ -6,10 +6,10 @@ fn main() { taos.raw_query("CREATE DATABASE test").unwrap(); taos.raw_query("USE test").unwrap(); let lines = [ - r#"[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "Beijing.Haidian", "groupid": 1}}, - {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "Beijing.Chaoyang", "groupid": 2}}, - {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "Beijing.Haidian", "groupid": 1}}]"#, + r#"[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, + {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, + {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#, ]; let affected_rows = taos diff --git a/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs b/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs index 18d7500714d9e41b1bebd490199d296ead3dc7c4..c8cab7655a24806e5c7659af80e83da383539c55 100644 --- a/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs +++ b/docs-examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs @@ -6,14 +6,14 @@ fn main() { taos.raw_query("CREATE DATABASE test").unwrap(); taos.raw_query("USE test").unwrap(); let lines = [ - "meters.current 1648432611249 10.3 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611250 12.6 location=Beijing.Chaoyang groupid=2", - "meters.current 1648432611249 10.8 location=Beijing.Haidian groupid=3", - "meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611249 219 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611250 218 location=Beijing.Chaoyang groupid=2", - "meters.voltage 1648432611249 221 location=Beijing.Haidian groupid=3", - "meters.voltage 1648432611250 217 location=Beijing.Haidian groupid=3", + "meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2", + "meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2", + "meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3", + "meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2", + "meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3", + "meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3", ]; let affected_rows = taos .schemaless_insert( diff --git a/examples/JDBC/taosdemo/pom.xml b/examples/JDBC/taosdemo/pom.xml index c50f77f1ed15c1cfe9b722718afd4a9fbfd03bb3..7a7a44a45d5a8c185380e55dd06c0246d0e9063b 100644 --- a/examples/JDBC/taosdemo/pom.xml +++ b/examples/JDBC/taosdemo/pom.xml @@ -10,7 +10,7 @@ Demo project for TDengine - 5.3.19 + 5.3.20 diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 93849dd4ebef00512854b4dfff8b57f4b44f7797..4586409493652d8e6486687b5c2518e1dfb87c06 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -120,6 +120,7 @@ function install_bin() { [ -x ${bin_dir}/taosd ] && ${csudo}ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || : [ -x ${bin_dir}/taosadapter ] && ${csudo}ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter || : [ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosdemo || : + [ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosBenchmark || : [ -x ${bin_dir}/TDinsight.sh ] && ${csudo}ln -sf ${bin_dir}/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : [ -x ${bin_dir}/taosdump ] && ${csudo}ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || : [ -x ${bin_dir}/set_core.sh ] && ${csudo}ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || : diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index b129d12840ee6a85807eb5f2e1f1e6d13814a94e..f886d08df0b127c2dcec494b220104e39e4bce13 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -23,6 +23,7 @@ data_link_dir=${installDir}/data log_link_dir=${installDir}/log cfg_link_dir=${installDir}/cfg bin_link_dir="/usr/bin" +local_bin_link_dir="/usr/local/bin" lib_link_dir="/usr/lib" lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" @@ -78,6 +79,7 @@ function kill_tarbitrator() { ${csudo}kill -9 $pid || : fi } + function clean_bin() { # Remove link ${csudo}rm -f ${bin_link_dir}/${clientName} || : @@ -93,6 +95,11 @@ function clean_bin() { ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : } +function clean_local_bin() { + ${csudo}rm -f ${local_bin_link_dir}/taosBenchmark || : + ${csudo}rm -f ${local_bin_link_dir}/taosdemo || : +} + function clean_lib() { # Remove link ${csudo}rm -f ${lib_link_dir}/libtaos.* || : @@ -213,6 +220,8 @@ function clean_service() { clean_service # Remove binary file and links clean_bin +# Remove links of local bin +clean_local_bin # Remove header file. clean_header # Remove lib file diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 2ddd9934d409b698257e1b12f5b36eb2bdb8964c..6c256f244668fa3b264aac262b1e5edb9b9b0453 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core20 -version: '2.4.0.0' +version: '2.7.0.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -69,7 +69,7 @@ parts: - etc/taos/taos.cfg - usr/bin/taosd - usr/bin/taos - - usr/lib/libtaos.so.2.4.0.0 + - usr/lib/libtaos.so.2.7.0.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index f3326bdb7b5ccfda29d51c8b2a0be8536632d035..6979465a0f41db6e527f003bd4703f46de0e8523 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -1141,29 +1141,64 @@ static int doSmlInsertOneDataPoint(TAOS* taos, TAOS_SML_DATA_POINT* point, SSmlL uint8_t precision = tableMeta->tableInfo.precision; free(tableMeta); - char* sql = malloc(TSDB_MAX_SQL_LEN + 1); - int freeBytes = TSDB_MAX_SQL_LEN; + char* sql = malloc(tsMaxSQLStringLen + 1); + if (sql == NULL) { + tscError("SML:0x%" PRIx64 " failed to allocate memory for sql", info->id); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + int freeBytes = tsMaxSQLStringLen; int sqlLen = 0; + int retLen; sqlLen += snprintf(sql + sqlLen, freeBytes - sqlLen, "insert into %s(", point->childTableName); for (int col = 0; col < point->fieldNum; ++col) { TAOS_SML_KV* kv = point->fields + col; - sqlLen += snprintf(sql + sqlLen, freeBytes - sqlLen, "%s,", kv->key); + retLen = snprintf(sql + sqlLen, freeBytes - sqlLen, "%s,", kv->key); + if (retLen >= freeBytes - sqlLen) { + tscError("SML:0x%" PRIx64 " no free space for building sql key", info->id); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + sqlLen += retLen; } --sqlLen; - sqlLen += snprintf(sql + sqlLen, freeBytes - sqlLen, ") values ("); + retLen += snprintf(sql + sqlLen, freeBytes - sqlLen, ") values ("); + if (retLen >= freeBytes - sqlLen) { + tscError("SML:0x%" PRIx64 " no free space for building sql", info->id); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + sqlLen += retLen; TAOS_SML_KV* tsField = point->fields + 0; int64_t ts = *(int64_t*)(tsField->value); ts = convertTimePrecision(ts, TSDB_TIME_PRECISION_NANO, precision); - sqlLen += snprintf(sql + sqlLen, freeBytes - sqlLen, "%" PRId64 ",", ts); + retLen = snprintf(sql + sqlLen, freeBytes - sqlLen, "%" PRId64 ",", ts); + if (retLen >= freeBytes - sqlLen) { + tscError("SML:0x%" PRIx64 " no free space for building timestamp", info->id); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + sqlLen += retLen; for (int col = 1; col < point->fieldNum; ++col) { TAOS_SML_KV* kv = point->fields + col; int32_t len = 0; + + if (freeBytes - sqlLen <= kv->length) { + tscError("SML:0x%" PRIx64 " no free space for converToStr", info->id); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } converToStr(sql + sqlLen, kv->type, kv->value, kv->length, &len); sqlLen += len; - sqlLen += snprintf(sql + sqlLen, freeBytes - sqlLen, ","); + retLen = snprintf(sql + sqlLen, freeBytes - sqlLen, ","); + if (retLen >= freeBytes - sqlLen) { + tscError("SML:0x%" PRIx64 " no free space for building sql comma", info->id); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + sqlLen += retLen; } --sqlLen; - sqlLen += snprintf(sql + sqlLen, freeBytes - sqlLen, ")"); + retLen = snprintf(sql + sqlLen, freeBytes - sqlLen, ")"); + if (retLen >= freeBytes - sqlLen) { + tscError("SML:0x%" PRIx64 " no free space for building the last right parenthesis", info->id); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + sqlLen += retLen; sql[sqlLen] = 0; tscDebug("SML:0x%" PRIx64 " insert child table table %s of super table %s sql: %s", info->id, diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 71d666e23a10bac219c4cd4ab845b3ae572ac246..d875a056aac614df42b8ed6321d3980b717cdf22 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -368,7 +368,7 @@ static int32_t handlePassword(SSqlCmd* pCmd, SStrToken* pPwd) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (pPwd->n > TSDB_PASS_LEN - 1) { + if (pPwd->n >= TSDB_PASS_LEN) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -1055,6 +1055,12 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { char buf[TSDB_TABLE_FNAME_LEN]; SStrToken sTblToken; sTblToken.z = buf; + // enterprise check +#ifndef TD_ENTERPRISE + const char* msg = "This feature is not supported in the community version. Please use the enterprise version."; + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); +#endif + // check if (validateTableName(tbName->z, tbName->n, &sTblToken, &dbIncluded) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), STR_INVALID_TABLE_NAME); @@ -10133,6 +10139,9 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod char fname[TSDB_TABLE_FNAME_LEN] = {0}; tNameExtractFullName(&pTableMetaInfo->name, fname); STableMetaVgroupInfo* p = taosHashGet(pCmd->pTableMetaMap, fname, strnlen(fname, TSDB_TABLE_FNAME_LEN)); + if (p == NULL) { + return TSDB_CODE_TSC_NO_META_CACHED; + } pTableMetaInfo->pTableMeta = tscTableMetaDup(p->pTableMeta); pTableMetaInfo->tableMetaCapacity = tscGetTableMetaSize(pTableMetaInfo->pTableMeta); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 67c7d2b1e8d112f98a66fb72b61d82c41ee11fe5..e42f73fb327b7a0c85741fd1edbcdc21845c5488 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1964,6 +1964,11 @@ int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) { // global aggregation may be the upstream for parent query SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); + if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + pQueryInfo->limit.limit = pQueryInfo->clauseLimit; + pQueryInfo->limit.offset = pQueryInfo->prjOffset; + } + if (pQueryInfo->pQInfo == NULL) { STableGroupInfo tableGroupInfo = {.numOfTables = 1, .pGroupList = taosArrayInit(1, POINTER_BYTES),}; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 849fe0ba73c20c16db40a593a38648566ca1265d..7280efe1fafe37aef368526376bc53bb0e2364fb 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -64,7 +64,7 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa } SRpcCorEpSet corMgmtEpSet; - char secretEncrypt[TSDB_PASS_LEN] = {0}; + char secretEncrypt[32] = {0}; int secretEncryptLen = 0; if (auth == NULL) { if (!validPassword(pass)) { @@ -211,7 +211,9 @@ TAOS *taos_connect_internal(const char *ip, const char *user, const char *pass, } else { printf("taos connect failed, reason: %s.\n\n", tstrerror(terrno)); } + int32_t lastError = terrno; taos_free_result(pSql); + if (terrno != lastError) terrno = lastError; taos_close(pObj); return NULL; } diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 27d9c3b97c2d100b8997eccb94ba7935c2e1c72f..b5db1d781d6fb85e88c99a3d05865a2558f68c05 100644 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -45,7 +45,7 @@ com.alibaba fastjson - 1.2.79 + 1.2.83 com.google.guava diff --git a/src/connector/odbc/CMakeLists.txt b/src/connector/odbc/CMakeLists.txt index 5e8095c7ddfa577767eb6ca03624c0c7278e174d..3f0439dd961f663b6d7f03da448d7911420091fb 100644 --- a/src/connector/odbc/CMakeLists.txt +++ b/src/connector/odbc/CMakeLists.txt @@ -58,7 +58,7 @@ macro(check_odbc_requirement) check_symbol_exists(SQLExecute "sql.h" HAVE_ODBC_DEV) IF (NOT HAVE_ODBC_DEV) - message(WARNING "odbc requirement not satisfied, check detail in ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log") + message(WARNING "odbc requirement not satisfied, please install unixodbc-dev. Check detail in ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log") return () ENDIF () diff --git a/src/connector/odbc/examples/c/CMakeLists.txt b/src/connector/odbc/examples/c/CMakeLists.txt index 42985496554adf5023a5078f7ec7650413a18bb4..999f0d19f7473d3af4acb42e1fcb4e9a0e9c2c64 100644 --- a/src/connector/odbc/examples/c/CMakeLists.txt +++ b/src/connector/odbc/examples/c/CMakeLists.txt @@ -4,7 +4,11 @@ ADD_EXECUTABLE(tcodbc main.c ../../src/todbc_log.c) IF (TD_LINUX OR TD_DARWIN) target_include_directories(tcodbc PRIVATE ${ODBC_INCLUDE_DIRECTORY}) - target_link_directories(tcodbc PUBLIC ${ODBC_LIBRARY_DIRECTORY}) + IF (CMAKE_VERSION VERSION_LESS 3.13) + link_directories(AFTER ${ODBC_LIBRARY_DIRECTORY}) + ELSE () + target_link_directories(tcodbc PUBLIC ${ODBC_LIBRARY_DIRECTORY}) + ENDIF () TARGET_LINK_LIBRARIES(tcodbc taos odbc) ENDIF () diff --git a/src/connector/odbc/src/CMakeLists.txt b/src/connector/odbc/src/CMakeLists.txt index f0df94795ac35f2b2e9537b7d437c34e2af193e2..a616c7df48b2fb3f70e0b109d81b71e040ae004d 100644 --- a/src/connector/odbc/src/CMakeLists.txt +++ b/src/connector/odbc/src/CMakeLists.txt @@ -19,7 +19,11 @@ IF (TD_LINUX_64) SET_TARGET_PROPERTIES(todbc PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1) TARGET_LINK_LIBRARIES(todbc todbc_base taos odbcinst) target_include_directories(todbc PRIVATE . ${ODBC_INCLUDE_DIRECTORY}) - target_link_directories(todbc PUBLIC ${ODBC_LIBRARY_DIRECTORY}) + IF (CMAKE_VERSION VERSION_LESS 3.13) + link_directories(AFTER ${ODBC_LIBRARY_DIRECTORY}) + ELSE () + target_link_directories(todbc PUBLIC ${ODBC_LIBRARY_DIRECTORY}) + ENDIF () install(CODE "execute_process(COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/install.sh ${CMAKE_BINARY_DIR})") ENDIF () diff --git a/src/connector/odbc/tools/CMakeLists.txt b/src/connector/odbc/tools/CMakeLists.txt index d49d334ef5019786a557263d0c04a7e71c9e715f..790620dd1d72db0ecc42f95a6ac569a6a821818e 100644 --- a/src/connector/odbc/tools/CMakeLists.txt +++ b/src/connector/odbc/tools/CMakeLists.txt @@ -5,7 +5,11 @@ ADD_EXECUTABLE(tconv tconv.c) IF (TD_LINUX OR TD_DARWIN) target_include_directories(todbcinst PRIVATE . ${ODBC_INCLUDE_DIRECTORY} ${ODBCINST_INCLUDE_DIRECTORY}) - target_link_directories(todbcinst PUBLIC ${ODBC_LIBRARY_DIRECTORY} ${ODBCINST_LIBRARY_DIRECTORY}) + IF (CMAKE_VERSION VERSION_LESS 3.13) + link_directories(AFTER ${ODBC_LIBRARY_DIRECTORY}) + ELSE () + target_link_directories(todbcinst PUBLIC ${ODBC_LIBRARY_DIRECTORY} ${ODBCINST_LIBRARY_DIRECTORY}) + ENDIF () TARGET_LINK_LIBRARIES(todbcinst odbc odbcinst) ENDIF () diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 8f262252b05e0539ceed0b31b8598e249f9a68c6..6254299fc4783343b2c202cd4251adf8aecd9db0 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -86,9 +86,9 @@ extern const int32_t TYPE_BYTES[16]; #define TSDB_DEFAULT_USER "root" #define TSDB_DEFAULT_PASS "taosdata" -#define TSDB_PASS_LEN 129 +#define TSDB_PASS_LEN 16 -#define SHELL_MAX_PASSWORD_LEN TSDB_PASS_LEN +#define SHELL_MAX_PASSWORD_LEN 20 #define TSDB_TRUE 1 #define TSDB_FALSE 0 #define TSDB_OK 0 diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index 7b22f89351a1247abebcd1b33cb8e2d394967dba..4eb8b3edfbd1f0bdc3520ed410df348e81206aa9 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -15,11 +15,12 @@ #ifndef __SHELL__ #define __SHELL__ - +#if !(defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)) +#include +#endif #include "stdbool.h" #include "taos.h" #include "taosdef.h" -#include "stdbool.h" #include "tsclient.h" #define MAX_USERNAME_SIZE 64 @@ -43,6 +44,11 @@ typedef struct SShellArguments { char* auth; char* database; char* timezone; + bool restful; + char* token; + int socket; + struct sockaddr_in serv_addr; + TAOS* con; bool is_raw_time; bool is_use_passwd; bool dump_config; @@ -59,9 +65,11 @@ typedef struct SShellArguments { char* netTestRole; } SShellArguments; +typedef enum WS_ACTION_TYPE_S { WS_CONN, WS_QUERY, WS_FETCH, WS_FETCH_BLOCK } WS_ACTION_TYPE; + /**************** Function declarations ****************/ extern void shellParseArgument(int argc, char* argv[], SShellArguments* arguments); -extern TAOS* shellInit(SShellArguments* args); +extern void shellInit(SShellArguments* args); extern void* shellLoopQuery(void* arg); extern void taos_error(TAOS_RES* tres, int64_t st); extern int regex_match(const char* s, const char* reg, int cflags); @@ -76,10 +84,15 @@ void shellCheck(TAOS* con, SShellArguments* args); void get_history_path(char* history); void shellCheck(TAOS* con, SShellArguments* args); void cleanup_handler(void* arg); +int convertHostToServAddr(); +void encode_base_64(char* base64_buf, char* user, char* password); void exitShell(); int shellDumpResult(TAOS_RES* con, char* fname, int* error_no, bool printMode); -void shellGetGrantInfo(void *con); -int isCommentLine(char *line); +void shellGetGrantInfo(void* con); +int isCommentLine(char* line); +int wsclient_handshake(); +int wsclient_conn(); +void wsclient_query(char* command); /**************** Global variable declarations ****************/ extern char PROMPT_HEADER[]; diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c index 70f372682e924875e8e8cb06ee2f7fc46dd732b6..97cca273f78b9f2b76e695a6a042a56c09b9a750 100644 --- a/src/kit/shell/src/shellDarwin.c +++ b/src/kit/shell/src/shellDarwin.c @@ -89,7 +89,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { || (strncmp(argv[i], "--password", 10) == 0)) { printf("Enter password: "); taosSetConsoleEcho(false); - if (scanf("%128s", g_password) > 1) { + if (scanf("%s", g_password) > 1) { fprintf(stderr, "password read error\n"); } taosSetConsoleEcho(true); diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 75fd46246364089c7066e8b2b378e665c0d945e8..c3163f94306db892350375270cb930e5f03f78e2 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -26,6 +26,7 @@ #include "taoserror.h" #include "tglobal.h" #include "tsclient.h" +#include "cJSON.h" #include @@ -35,7 +36,8 @@ char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Vers char PROMPT_HEADER[] = "taos> "; char CONTINUE_PROMPT[] = " -> "; int prompt_size = 6; - +const char *BASE64_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +const char hex[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; int64_t result = 0; SShellHistory history; @@ -43,10 +45,13 @@ SShellHistory history; extern int32_t tsMaxBinaryDisplayWidth; extern TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port); +static int calcColWidth(TAOS_FIELD *field, int precision); +static void printHeader(TAOS_FIELD *fields, int *width, int num_fields); + /* * FUNCTION: Initialize the shell. */ -TAOS *shellInit(SShellArguments *_args) { +void shellInit(SShellArguments *_args) { printf("\n"); if (!_args->is_use_passwd) { #ifdef WINDOWS @@ -59,11 +64,6 @@ TAOS *shellInit(SShellArguments *_args) { fflush(stdout); - // set options before initializing - if (_args->timezone != NULL) { - taos_options(TSDB_OPTION_TIMEZONE, _args->timezone); - } - if (!_args->is_use_passwd) { _args->password = TSDB_DEFAULT_PASS; } @@ -72,59 +72,84 @@ TAOS *shellInit(SShellArguments *_args) { _args->user = TSDB_DEFAULT_USER; } - if (taos_init()) { - printf("failed to init taos\n"); - fflush(stdout); - return NULL; - } - - // Connect to the database. - TAOS *con = NULL; - if (_args->auth == NULL) { - con = taos_connect(_args->host, _args->user, _args->password, _args->database, _args->port); + if (_args->restful) { + _args->database = calloc(1, 128); + _args->socket = socket(AF_INET, SOCK_STREAM, 0); + if (_args->socket < 0) { + fprintf(stderr, "failed to create socket"); + exit(EXIT_FAILURE); + } + int retConn = connect(_args->socket, (struct sockaddr *)&(_args->serv_addr), sizeof(struct sockaddr)); + if (retConn < 0) { + fprintf(stderr, "failed to connect"); + close(_args->socket); + exit(EXIT_FAILURE); + } + if (wsclient_handshake()) { + exit(EXIT_FAILURE); + } + if (wsclient_conn()) { + exit(EXIT_FAILURE); + } } else { - con = taos_connect_auth(_args->host, _args->user, _args->auth, _args->database, _args->port); - } - - if (con == NULL) { - fflush(stdout); - return con; - } - - /* Read history TODO : release resources here*/ - read_history(); + // set options before initializing + if (_args->timezone != NULL) { + taos_options(TSDB_OPTION_TIMEZONE, _args->timezone); + } - // Check if it is temperory run - if (_args->commands != NULL || _args->file[0] != 0) { - if (_args->commands != NULL) { - printf("%s%s\n", PROMPT_HEADER, _args->commands); - shellRunCommand(con, _args->commands); + if (taos_init()) { + printf("failed to init taos\n"); + fflush(stdout); + exit(EXIT_FAILURE); } - if (_args->file[0] != 0) { - source_file(con, _args->file); + // Connect to the database. + if (_args->auth == NULL) { + _args->con = taos_connect(_args->host, _args->user, _args->password, _args->database, _args->port); + } else { + _args->con = taos_connect_auth(_args->host, _args->user, _args->auth, _args->database, _args->port); } - taos_close(con); - write_history(); - exit(EXIT_SUCCESS); + if (_args->con == NULL) { + fflush(stdout); + exit(EXIT_FAILURE); + } } + /* Read history TODO : release resources here*/ + read_history(); + + // Check if it is temperory run + if (_args->commands != NULL || _args->file[0] != 0) { + if (_args->commands != NULL) { + printf("%s%s\n", PROMPT_HEADER, _args->commands); + shellRunCommand(_args->con, _args->commands); + } + + if (_args->file[0] != 0) { + source_file(_args->con, _args->file); + } + + taos_close(_args->con); + write_history(); + exit(EXIT_SUCCESS); + } + #ifndef WINDOWS - if (_args->dir[0] != 0) { - source_dir(con, _args); - taos_close(con); - exit(EXIT_SUCCESS); - } + if (_args->dir[0] != 0) { + source_dir(_args->con, _args); + taos_close(_args->con); + exit(EXIT_SUCCESS); + } - if (_args->check != 0) { - shellCheck(con, _args); - taos_close(con); - exit(EXIT_SUCCESS); - } + if (_args->check != 0) { + shellCheck(_args->con, _args); + taos_close(_args->con); + exit(EXIT_SUCCESS); + } #endif - - return con; + + return; } static bool isEmptyCommand(const char* cmd) { @@ -145,7 +170,11 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) { // Analyse the command. if (regex_match(command, "^[ \t]*(quit|q|exit)[ \t;]*$", REG_EXTENDED | REG_ICASE)) { - taos_close(con); + if (args.restful) { + close(args.socket); + } else { + taos_close(con); + } write_history(); #ifdef WINDOWS exit(EXIT_SUCCESS); @@ -272,6 +301,11 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { printMode = true; // When output to a file, the switch does not work. } + if (args.restful) { + wsclient_query(command); + return; + } + st = taosGetTimestampUs(); TAOS_RES* pSql = taos_query_h(con, command, &result); @@ -1065,3 +1099,499 @@ void source_file(TAOS *con, char *fptr) { void shellGetGrantInfo(void *con) { return; } + +void _base64_encode_triple(unsigned char triple[3], char res[4]) { + int tripleValue, i; + + tripleValue = triple[0]; + tripleValue *= 256; + tripleValue += triple[1]; + tripleValue *= 256; + tripleValue += triple[2]; + + for (i = 0; i < 4; i++) { + res[3 - i] = BASE64_CHARS[tripleValue % 64]; + tripleValue /= 64; + } +} + +int taos_base64_encode(unsigned char *source, size_t sourcelen, char *target, size_t targetlen) { + /* check if the result will fit in the target buffer */ + if ((sourcelen + 2) / 3 * 4 > targetlen - 1) return 0; + + /* encode all full triples */ + while (sourcelen >= 3) { + _base64_encode_triple(source, target); + sourcelen -= 3; + source += 3; + target += 4; + } + + /* encode the last one or two characters */ + if (sourcelen > 0) { + unsigned char temp[3]; + memset(temp, 0, sizeof(temp)); + memcpy(temp, source, sourcelen); + _base64_encode_triple(temp, target); + target[3] = '='; + if (sourcelen == 1) target[2] = '='; + + target += 4; + } + + /* terminate the string */ + target[0] = 0; + + return 1; +} + +int convertHostToServAddr() { + if (args.port == 0) { + args.port = 6041; + } + if (NULL == args.host) { + args.host = "127.0.0.1"; + } + struct hostent *server = gethostbyname(args.host); + if ((server == NULL) || (server->h_addr == NULL)) { + fprintf(stderr, "no such host: %s", args.host); + return -1; + } + memset(&(args.serv_addr), 0, sizeof(struct sockaddr_in)); + args.serv_addr.sin_family = AF_INET; + args.serv_addr.sin_port = htons(args.port); +#ifdef WINDOWS + args.serv_addr.sin_addr.s_addr = inet_addr(args.host); +#else + memcpy(&(args.serv_addr.sin_addr.s_addr), server->h_addr, server->h_length); +#endif + + return 0; +} + +int wsclient_handshake() { + char request_header[1024]; + char recv_buf[1024]; + unsigned char key_nonce[16]; + char websocket_key[256]; + memset(request_header, 0, 1024); + srand(time(NULL)); + int i; + for (i = 0; i < 16; i++) { + key_nonce[i] = rand() & 0xff; + } + taos_base64_encode(key_nonce, 16, websocket_key, 256); + if (args.token) { + snprintf(request_header, 1024, + "GET /rest/ws?token=%s HTTP/1.1\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nHost: " + "%s:%d\r\nSec-WebSocket-Key: " + "%s\r\nSec-WebSocket-Version: 13\r\n\r\n", + args.token, args.host, args.port, websocket_key); + } else { + snprintf(request_header, 1024, + "GET /rest/ws HTTP/1.1\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nHost: %s:%d\r\nSec-WebSocket-Key: " + "%s\r\nSec-WebSocket-Version: 13\r\n\r\n", + args.host, args.port, websocket_key); + } + + ssize_t n = send(args.socket, request_header, strlen(request_header), 0); + if (n == 0) { + fprintf(stderr, "web socket handshake error\n"); + return -1; + } + n = recv(args.socket, recv_buf, 1023, 0); + if (NULL == strstr(recv_buf, "HTTP/1.1 101")) { + fprintf(stderr, "web socket handshake failed: %s\n", recv_buf); + } + return 0; +} + +int wsclient_send(char *strdata) { + struct timeval tv; + unsigned char mask[4]; + unsigned int mask_int; + unsigned long long payload_len; + unsigned char finNopcode; + unsigned int payload_len_small; + unsigned int payload_offset = 6; + unsigned int len_size; + // unsigned long long be_payload_len; + unsigned int sent = 0; + int i; + unsigned int frame_size; + char *data; + gettimeofday(&tv, NULL); + srand(tv.tv_usec * tv.tv_sec); + mask_int = rand(); + memcpy(mask, &mask_int, 4); + payload_len = strlen(strdata); + finNopcode = 0x81; + if (payload_len <= 125) { + frame_size = 6 + payload_len; + payload_len_small = payload_len; + } else if (payload_len > 125 && payload_len <= 0xffff) { + frame_size = 8 + payload_len; + payload_len_small = 126; + payload_offset += 2; + } else if (payload_len > 0xffff && payload_len <= 0xffffffffffffffffLL) { + frame_size = 14 + payload_len; + payload_len_small = 127; + payload_offset += 8; + } else { + fprintf(stderr, "websocket send too large data\n"); + return -1; + } + data = (char *)malloc(frame_size); + memset(data, 0, frame_size); + *data = finNopcode; + *(data + 1) = payload_len_small | 0x80; + if (payload_len_small == 126) { + payload_len &= 0xffff; + len_size = 2; + for (i = 0; i < len_size; i++) { + *(data + 2 + i) = *((char *)&payload_len + (len_size - i - 1)); + } + } + if (payload_len_small == 127) { + payload_len &= 0xffffffffffffffffLL; + len_size = 8; + for (i = 0; i < len_size; i++) { + *(data + 2 + i) = *((char *)&payload_len + (len_size - i - 1)); + } + } + for (i = 0; i < 4; i++) *(data + (payload_offset - 4) + i) = mask[i]; + + memcpy(data + payload_offset, strdata, strlen(strdata)); + for (i = 0; i < strlen(strdata); i++) *(data + payload_offset + i) ^= mask[i % 4] & 0xff; + sent = 0; + i = 0; + while (sent < frame_size && i >= 0) { + i = send(args.socket, data + sent, frame_size - sent, 0); + sent += i; + } + if (i < 0) { + fprintf(stderr, "websocket send data error\n"); + } + free(data); + return 0; +} + +int wsclient_send_sql(char *command, WS_ACTION_TYPE type, int id) { + cJSON *json = cJSON_CreateObject(); + cJSON *_args = cJSON_CreateObject(); + cJSON_AddNumberToObject(_args, "req_id", 1); + switch (type) { + case WS_CONN: + cJSON_AddStringToObject(json, "action", "conn"); + cJSON_AddStringToObject(_args, "user", "root"); + cJSON_AddStringToObject(_args, "password", "taosdata"); + cJSON_AddStringToObject(_args, "db", ""); + + break; + case WS_QUERY: + cJSON_AddStringToObject(json, "action", "query"); + cJSON_AddStringToObject(_args, "sql", command); + break; + case WS_FETCH: + cJSON_AddStringToObject(json, "action", "fetch"); + cJSON_AddNumberToObject(_args, "id", id); + break; + case WS_FETCH_BLOCK: + cJSON_AddStringToObject(json, "action", "fetch_block"); + cJSON_AddNumberToObject(_args, "id", id); + break; + } + cJSON_AddItemToObject(json, "args", _args); + char *strdata = NULL; + strdata = cJSON_Print(json); + if (wsclient_send(strdata)) { + free(strdata); + return -1; + } + return 0; +} + +int wsclient_conn() { + if (wsclient_send_sql(NULL, WS_CONN, 0)) { + return -1; + } + char recv_buffer[1024]; + memset(recv_buffer, 0, 1024); + recv(args.socket, recv_buffer, 1023, 0); + char *received_json = strstr(recv_buffer, "{"); + cJSON *root = cJSON_Parse(received_json); + if (root == NULL) { + fprintf(stderr, "fail to parse response into json: %s\n", recv_buffer); + } + + cJSON *code = cJSON_GetObjectItem(root, "code"); + if (!cJSON_IsNumber(code)) { + fprintf(stderr, "wrong code key in json: %s\n", received_json); + cJSON_Delete(root); + return -1; + } + if (code->valueint == 0) { + cJSON_Delete(root); + return 0; + } else { + cJSON *message = cJSON_GetObjectItem(root, "message"); + if (!cJSON_IsString(message)) { + fprintf(stderr, "wrong message key in json: %s\n", received_json); + cJSON_Delete(root); + return -1; + } + fprintf(stderr, "failed to connection, reason: %s\n", message->valuestring); + } + cJSON_Delete(root); + return -1; +} + +cJSON *wsclient_parse_response() { + char *recv_buffer = calloc(1, 4096); + int start = 0; + bool found = false; + int received = 0; + int bytes; + int recv_length = 4095; + do { + bytes = recv(args.socket, recv_buffer + received, recv_length - received, 0); + if (bytes == -1) { + fprintf(stderr, "websocket recv failed with bytes: %d\n", bytes); + return NULL; + } + + if (!found) { + for (; start < recv_length - received; start++) { + if ((recv_buffer + start)[0] == '{') { + found = true; + break; + } + } + } + if (NULL != strstr(recv_buffer + start, "}")) { + break; + } + received += bytes; + if (received >= recv_length) { + recv_length += 4096; + recv_buffer = realloc(recv_buffer + start, recv_length); + } + } while (1); + cJSON *res = cJSON_Parse(recv_buffer + start); + if (res == NULL) { + fprintf(stderr, "fail to parse response into json: %s\n", recv_buffer + start); + free(recv_buffer); + return NULL; + } + return res; +} + +TAOS_FIELD *wsclient_print_header(cJSON *query, int *pcols, int *pprecison) { + TAOS_FIELD *fields = NULL; + cJSON *fields_count = cJSON_GetObjectItem(query, "fields_count"); + if (cJSON_IsNumber(fields_count)) { + *pcols = (int)fields_count->valueint; + fields = calloc((int)fields_count->valueint, sizeof(TAOS_FIELD)); + cJSON *fields_names = cJSON_GetObjectItem(query, "fields_names"); + cJSON *fields_types = cJSON_GetObjectItem(query, "fields_types"); + cJSON *fields_lengths = cJSON_GetObjectItem(query, "fields_lengths"); + if (cJSON_IsArray(fields_names) && cJSON_IsArray(fields_types) && cJSON_IsArray(fields_lengths)) { + for (int i = 0; i < (int)fields_count->valueint; i++) { + strncpy(fields[i].name, cJSON_GetArrayItem(fields_names, i)->valuestring, 65); + fields[i].type = (uint8_t)cJSON_GetArrayItem(fields_types, i)->valueint; + fields[i].bytes = (int16_t)cJSON_GetArrayItem(fields_lengths, i)->valueint; + } + cJSON *precision = cJSON_GetObjectItem(query, "precision"); + if (cJSON_IsNumber(precision)) { + *pprecison = (int)precision->valueint; + int width[TSDB_MAX_COLUMNS]; + for (int col = 0; col < (int)fields_count->valueint; col++) { + width[col] = calcColWidth(fields + col, (int)precision->valueint); + } + printHeader(fields, width, (int)fields_count->valueint); + return fields; + } else { + fprintf(stderr, "Invalid precision key in json\n"); + } + } else { + fprintf(stderr, "Invalid fields_names/fields_types/fields_lengths key in json\n"); + } + } else { + fprintf(stderr, "Invalid fields_count key in json\n"); + } + if (fields != NULL) { + free(fields); + } + return NULL; +} + +int wsclient_check(cJSON *root, int64_t st, int64_t et) { + cJSON *code = cJSON_GetObjectItem(root, "code"); + if (cJSON_IsNumber(code)) { + if (code->valueint == 0) { + return 0; + } else { + cJSON *message = cJSON_GetObjectItem(root, "message"); + if (cJSON_IsString(message)) { + fprintf(stderr, "\nDB error: %s (%.6fs)\n", message->valuestring, (et - st) / 1E6); + } else { + fprintf(stderr, "Invalid message key in json\n"); + } + } + } else { + fprintf(stderr, "Invalid code key in json\n"); + } + return -1; +} + +int wsclient_print_data(int rows, TAOS_FIELD *fields, int cols, int64_t id, int precision, int* pshowed_rows) { + char *recv_buffer = calloc(1, 4096); + int col_length = 0; + for (int i = 0; i < cols; i++) { + col_length += fields[i].bytes; + } + int total_recv_len = col_length * rows + 12; + int received = 0; + int recv_length = 4095; + int start = 0; + int pos; + do { + int bytes = recv(args.socket, recv_buffer + received, recv_length - received, 0); + received += bytes; + if (received >= recv_length) { + recv_length += 4096; + recv_buffer = realloc(recv_buffer, recv_length); + } + } while (received < total_recv_len); + + while (1) { + if (*(int64_t *)(recv_buffer + start) == id) { + break; + } + start++; + } + start += 8; + int width[TSDB_MAX_COLUMNS]; + for (int c = 0; c < cols; c++) { + width[c] = calcColWidth(fields + c, precision); + } + for (int i = 0; i < rows; i++) { + if (*pshowed_rows == DEFAULT_RES_SHOW_NUM) { + return 0; + } + for (int c = 0; c < cols; c++) { + pos = start; + pos += i * fields[c].bytes; + for (int j = 0; j < c; j++) { + pos += fields[j].bytes * rows; + } + putchar(' '); + int16_t length = 0; + if (fields[c].type == TSDB_DATA_TYPE_NCHAR || fields[c].type == TSDB_DATA_TYPE_BINARY || + fields[c].type == TSDB_DATA_TYPE_JSON) { + length = *(int16_t *)(recv_buffer + pos); + pos += 2; + } + printField((const char *)(recv_buffer + pos), fields + c, width[c], (int32_t)length, precision); + putchar(' '); + putchar('|'); + } + putchar('\n'); + *pshowed_rows += 1; + } + return 0; +} + +void wsclient_query(char *command) { + int64_t st, et; + st = taosGetTimestampUs(); + if (wsclient_send_sql(command, WS_QUERY, 0)) { + return; + } + + et = taosGetTimestampUs(); + cJSON *query = wsclient_parse_response(); + if (query == NULL) { + return; + } + + if (wsclient_check(query, st, et)) { + return; + } + cJSON *is_update = cJSON_GetObjectItem(query, "is_update"); + if (cJSON_IsBool(is_update)) { + if (is_update->valueint) { + cJSON *affected_rows = cJSON_GetObjectItem(query, "affected_rows"); + if (cJSON_IsNumber(affected_rows)) { + printf("Update OK, %d row(s) in set (%.6fs)\n\n", (int)affected_rows->valueint, (et - st) / 1E6); + } else { + fprintf(stderr, "Invalid affected_rows key in json\n"); + } + } else { + int cols = 0; + int precision = 0; + int64_t total_rows = 0; + int showed_rows = 0; + TAOS_FIELD *fields = wsclient_print_header(query, &cols, &precision); + if (fields != NULL) { + cJSON *id = cJSON_GetObjectItem(query, "id"); + if (cJSON_IsNumber(id)) { + bool completed = false; + while (!completed) { + if (wsclient_send_sql(NULL, WS_FETCH, (int)id->valueint) == 0) { + cJSON *fetch = wsclient_parse_response(); + if (fetch != NULL) { + if (wsclient_check(fetch, st, et) == 0) { + cJSON *_completed = cJSON_GetObjectItem(fetch, "completed"); + if (cJSON_IsBool(_completed)) { + if (_completed->valueint) { + completed = true; + continue; + } + cJSON *rows = cJSON_GetObjectItem(fetch, "rows"); + if (cJSON_IsNumber(rows)) { + total_rows += rows->valueint; + cJSON *lengths = cJSON_GetObjectItem(fetch, "lengths"); + if (cJSON_IsArray(lengths)) { + for (int i = 0; i < cols; i++) { + fields[i].bytes = (int16_t)(cJSON_GetArrayItem(lengths, i)->valueint); + } + if (showed_rows < DEFAULT_RES_SHOW_NUM) { + if (wsclient_send_sql(NULL, WS_FETCH_BLOCK, (int)id->valueint) == 0) { + wsclient_print_data((int)rows->valueint, fields, cols, id->valueint, precision, &showed_rows); + } + } + continue; + } else { + fprintf(stderr, "Invalid lengths key in json\n"); + } + } else { + fprintf(stderr, "Invalid rows key in json\n"); + } + } else { + fprintf(stderr, "Invalid completed key in json\n"); + } + } + } + } + fprintf(stderr, "err occured in fetch/fetch_block ws actions\n"); + break; + } + if (showed_rows == DEFAULT_RES_SHOW_NUM) { + printf("\n"); + printf(" Notice: The result shows only the first %d rows.\n", DEFAULT_RES_SHOW_NUM); + printf("\n"); + } + printf("Query OK, %" PRId64 " row(s) in set (%.6fs)\n\n", total_rows, (et - st) / 1E6); + } else { + fprintf(stderr, "Invalid id key in json\n"); + } + free(fields); + } + } + } else { + fprintf(stderr, "Invalid is_update key in json\n"); + } + cJSON_Delete(query); + return; +} diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 7a8e2d6e54feb160b377895caef608bc96f90407..e529d829e97a4329d4ebe2eb3fc2082d718d163f 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -52,6 +52,8 @@ static struct argp_option options[] = { {"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."}, {"pktnum", 'N', "PKTNUM", 0, "Packet numbers used for net test, default is 100."}, {"pkttype", 'S', "PKTTYPE", 0, "Choose packet type used for net test, default is TCP. Only speed test could be either TCP or UDP."}, + {"restful", 'R', 0, 0, "Connect and interact with TDengine use restful"}, + {"token", 't', "TOKEN", 0, "The token to use when connecting TDengine's cloud services"}, {0}}; static error_t parse_opt(int key, char *arg, struct argp_state *state) { @@ -162,6 +164,12 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case OPT_ABORT: arguments->abort = 1; break; + case 't': + arguments->token = arg; + break; + case 'R': + arguments->restful = true; + break; default: return ARGP_ERR_UNKNOWN; } @@ -186,7 +194,7 @@ static void parse_args( || (strncmp(argv[i], "--password", 10) == 0)) { printf("Enter password: "); taosSetConsoleEcho(false); - if (scanf("%128s", g_password) > 1) { + if (scanf("%s", g_password) > 1) { fprintf(stderr, "password reading error\n"); } taosSetConsoleEcho(true); diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index afed5d2d2ffa680852c1155334499975cd58cfea..425b25ca80f64fdb73a4ed34f7a41a6167007725 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -44,7 +44,6 @@ void *cancelHandler(void *arg) { exit(0); #endif } - return NULL; } @@ -69,14 +68,15 @@ int checkVersion() { } // Global configurations -SShellArguments args = { - .host = NULL, +SShellArguments args = {.host = NULL, #ifndef TD_WINDOWS .password = NULL, #endif .user = NULL, .database = NULL, .timezone = NULL, + .restful = false, + .token = NULL, .is_raw_time = false, .is_use_passwd = false, .dump_config = false, @@ -87,8 +87,7 @@ SShellArguments args = { .pktLen = 1000, .pktNum = 100, .pktType = "TCP", - .netTestRole = NULL -}; + .netTestRole = NULL}; /* * Main function. @@ -127,12 +126,15 @@ int main(int argc, char* argv[]) { exit(0); } - /* Initialize the shell */ - TAOS* con = shellInit(&args); - if (con == NULL) { - exit(EXIT_FAILURE); + if (args.restful) { + if (convertHostToServAddr()) { + exit(EXIT_FAILURE); + } } + /* Initialize the shell */ + shellInit(&args); + if (tsem_init(&cancelSem, 0, 0) != 0) { printf("failed to create cancel semphore\n"); exit(EXIT_FAILURE); @@ -148,11 +150,11 @@ int main(int argc, char* argv[]) { taosSetSignal(SIGABRT, shellQueryInterruptHandler); /* Get grant information */ - shellGetGrantInfo(con); + shellGetGrantInfo(args.con); /* Loop to query the input. */ while (1) { - pthread_create(&pid, NULL, shellLoopQuery, con); + pthread_create(&pid, NULL, shellLoopQuery, args.con); pthread_join(pid, NULL); } } diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index dd2f85a3d3480479006ddd7b8ebe78073186e8a5..f3f6c2b2e6ddd67b08114958e062ec6c18a3d272 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -93,7 +93,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { || (strncmp(argv[i], "--password", 10) == 0)) { printf("Enter password: "); taosSetConsoleEcho(false); - if (scanf("%128s", g_password) > 1) { + if (scanf("%s", g_password) > 1) { fprintf(stderr, "password read error!\n"); } taosSetConsoleEcho(true); diff --git a/src/kit/taos-tools b/src/kit/taos-tools index 3b6bf41d16de351668fc02589f931da383d8a9fe..4d83d8c62973506f760bcaa3a33f4665ed9046d0 160000 --- a/src/kit/taos-tools +++ b/src/kit/taos-tools @@ -1 +1 @@ -Subproject commit 3b6bf41d16de351668fc02589f931da383d8a9fe +Subproject commit 4d83d8c62973506f760bcaa3a33f4665ed9046d0 diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 80b8be7d833def37090f5fd54f7c73c2c6768b5c..37732cbbb394f0d473495cc7f46f71e8438be78c 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -548,7 +548,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) { pStatus->numOfCores = htons(pStatus->numOfCores); uint32_t _version = htonl(pStatus->version); - if (_version != tsVersion >> 8) { + if ((_version >> 16) != (tsVersion >> 24)) { pDnode = mnodeGetDnodeByEp(pStatus->dnodeEp); if (pDnode != NULL && pDnode->status != TAOS_DN_STATUS_READY) { pDnode->offlineReason = TAOS_DN_OFF_VERSION_NOT_MATCH; diff --git a/src/os/inc/osDir.h b/src/os/inc/osDir.h index 7afe1264512bbffd34d9278ffb37034a473b827f..899b99a182aeb09cadbe3560f0976c885e609a20 100644 --- a/src/os/inc/osDir.h +++ b/src/os/inc/osDir.h @@ -22,6 +22,7 @@ extern "C" { void taosRemoveDir(char *rootDir); bool taosDirExist(const char* dirname); +int32_t taosMkdirP(const char *pathname, int keepBase); int32_t taosMkDir(const char *pathname, mode_t mode); void taosRemoveOldLogFiles(char *rootDir, int32_t keepDays); int32_t taosRename(char *oldName, char *newName); diff --git a/src/os/src/detail/osDir.c b/src/os/src/detail/osDir.c index c467c64872d4b660af5cebb19b017f1528b55055..17c844ed863c227fe1178b7d99fee4a300a0b3e2 100644 --- a/src/os/src/detail/osDir.c +++ b/src/os/src/detail/osDir.c @@ -49,8 +49,37 @@ bool taosDirExist(const char* dirname) { return access(dirname, F_OK) == 0; } -int taosMkDir(const char *path, mode_t mode) { - int code = mkdir(path, 0755); +int32_t taosMkdirP(const char *dir, int keepLast) { + char tmp[256]; + char *p = NULL; + size_t len; + size_t i; + + snprintf(tmp, sizeof(tmp),"%s",dir); + len = strlen(tmp); + if (!keepLast) { + for (i = len - 1; i > 0; --i) + if (tmp[i] == '/') { + tmp[i] = 0; + break; + } + } + + for (p = tmp + 1; *p; p++) + if (*p == '/') { + *p = 0; + if (mkdir(tmp, S_IRWXU) && errno != EEXIST) + return -1; + *p = '/'; + } + if (mkdir(tmp, S_IRWXU) && errno != EEXIST) + return -1; + + return 0; +} + +int32_t taosMkDir(const char *path, mode_t mode) { + int code = mkdir(path, mode); if (code < 0 && errno == EEXIST) code = 0; return code; } diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt index a91903630cbe75675f4c985f0906e6830d3cf2ff..aeb7f538ce3c81dc619a124fe31bebd2902ea357 100644 --- a/src/plugins/CMakeLists.txt +++ b/src/plugins/CMakeLists.txt @@ -60,8 +60,8 @@ ELSE () PATCH_COMMAND COMMAND git clean -f -d BUILD_COMMAND - COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" - COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" INSTALL_COMMAND COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -o upx.tar.xz && tar -xvJf upx.tar.xz -C ${CMAKE_BINARY_DIR} --strip-components 1 > /dev/null && ${CMAKE_BINARY_DIR}/upx taosadapter || : COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin @@ -82,8 +82,8 @@ ELSE () PATCH_COMMAND COMMAND git clean -f -d BUILD_COMMAND - COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" - COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" + COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" INSTALL_COMMAND COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ diff --git a/src/plugins/taosadapter b/src/plugins/taosadapter index f005d7bd83f8509716c07d126f374f1ed2bc59f2..2ca30513fa0de5d682c3ee2afc10d1dcc9a341fb 160000 --- a/src/plugins/taosadapter +++ b/src/plugins/taosadapter @@ -1 +1 @@ -Subproject commit f005d7bd83f8509716c07d126f374f1ed2bc59f2 +Subproject commit 2ca30513fa0de5d682c3ee2afc10d1dcc9a341fb diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 5701f95f0078ac707d401b2d072972d1853f81e7..42e649be10a9eec75cc1ec610cba86586a0f27c0 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -1856,6 +1856,17 @@ int32_t tsCompare(const void* p1, const void* p2) { } } +int32_t tsCompareDesc(const void* p1, const void* p2) { + TSKEY k = *(TSKEY*)p1; + SResPair* pair = (SResPair*)p2; + + if (k == pair->key) { + return 0; + } else { + return k > pair->key? -1:1; + } +} + static void stddev_dst_function(SQLFunctionCtx *pCtx) { SStddevdstInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); @@ -1877,7 +1888,7 @@ static void stddev_dst_function(SQLFunctionCtx *pCtx) { SResPair* p = taosArrayGet(resList, 0); avg = p->avg; } else { // todo opt performance by using iterator since the timestamp lsit is matched with the output result - SResPair* p = bsearch(&pCtx->startTs, resList->pData, len, sizeof(SResPair), tsCompare); + SResPair* p = bsearch(&pCtx->startTs, resList->pData, len, sizeof(SResPair), pCtx->order == TSDB_ORDER_DESC ? tsCompareDesc : tsCompare); if (p == NULL) { return; } diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index dbe385e249e19f77786538f344ef6f6485166fda..d83620c78fe0cc87a1fff61b6c58dff9852ecbec 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -69,7 +69,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData // set the other values if (pFillInfo->type == TSDB_FILL_PREV) { - char* p = FILL_IS_ASC_FILL(pFillInfo) ? prev : next; + char* p = prev; if (p != NULL) { for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) { @@ -85,7 +85,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index); } } else if (pFillInfo->type == TSDB_FILL_NEXT) { - char* p = FILL_IS_ASC_FILL(pFillInfo)? next : prev; + char* p = next; if (p != NULL) { for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) { @@ -178,8 +178,6 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR if (FILL_IS_ASC_FILL(pFillInfo)) { assert(pFillInfo->currentKey >= pFillInfo->start); - } else { - assert(pFillInfo->currentKey <= pFillInfo->start); } while (pFillInfo->numOfCurrent < outputRows) { @@ -451,6 +449,27 @@ void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const SSDataBlock* pInput) memcpy(pTag->tagVal, pColData->pData, pCol->col.bytes); // TODO not memcpy?? } } + + // check currentKey validate + if (!FILL_IS_ASC_FILL(pFillInfo)) { + int64_t* tsList = (int64_t*) pFillInfo->pData[0]; + int32_t numOfRows = taosNumOfRemainRows(pFillInfo); + int64_t numOfRes = -1; + if (numOfRows > 0) { + TSKEY lastKey = tsList[pFillInfo->numOfRows - 1]; + numOfRes = taosTimeCountInterval( + lastKey, + pFillInfo->currentKey, + pFillInfo->interval.sliding, + pFillInfo->interval.slidingUnit, + pFillInfo->precision); + numOfRes += 1; + if(numOfRes < numOfRows || pFillInfo->currentKey < lastKey) { + // set currentKey max + pFillInfo->currentKey = tsList[0]; + } + } + } } bool taosFillHasMoreResults(SFillInfo* pFillInfo) { @@ -459,8 +478,7 @@ bool taosFillHasMoreResults(SFillInfo* pFillInfo) { return true; } - if (pFillInfo->numOfTotal > 0 && (((pFillInfo->end > pFillInfo->start) && FILL_IS_ASC_FILL(pFillInfo)) || - (pFillInfo->end < pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo)))) { + if (pFillInfo->numOfTotal > 0 && pFillInfo->end > pFillInfo->start) { return getNumOfResultsAfterFillGap(pFillInfo, pFillInfo->end, 4096) > 0; } diff --git a/src/tsdb/inc/tsdbFile.h b/src/tsdb/inc/tsdbFile.h index 6d1e0cf2461a28dbcf481c7dc93d651551c0453d..75e95631513e354960df5119b25ac3b6620a29d8 100644 --- a/src/tsdb/inc/tsdbFile.h +++ b/src/tsdb/inc/tsdbFile.h @@ -288,8 +288,11 @@ static FORCE_INLINE int64_t tsdbReadDFile(SDFile* pDFile, void* buf, int64_t nby static FORCE_INLINE int tsdbCopyDFile(SDFile* pSrc, SDFile* pDest) { if (tfscopy(TSDB_FILE_F(pSrc), TSDB_FILE_F(pDest)) < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - return -1; + int32_t ret = taosMkdirP(TSDB_FILE_FULL_NAME(pDest), 0); + if (ret < 0 || tfscopy(TSDB_FILE_F(pSrc), TSDB_FILE_F(pDest)) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } } tsdbSetDFileInfo(pDest, TSDB_FILE_INFO(pSrc)); @@ -401,4 +404,4 @@ static FORCE_INLINE bool tsdbFSetIsOk(SDFileSet* pSet) { return true; } -#endif /* _TS_TSDB_FILE_H_ */ \ No newline at end of file +#endif /* _TS_TSDB_FILE_H_ */ diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c index 05324d31eec56ee74b81c70dc451eadf83d518d2..f50cf4c6df67db23fc3d0e9732b9a0eda53fca2f 100644 --- a/src/wal/src/walMgmt.c +++ b/src/wal/src/walMgmt.c @@ -139,7 +139,7 @@ void walClose(void *handle) { } static int32_t walInitObj(SWal *pWal) { - if (taosMkDir(pWal->path, 0755) != 0) { + if (taosMkdirP(pWal->path, 1) != 0) { wError("vgId:%d, path:%s, failed to create directory since %s", pWal->vgId, pWal->path, strerror(errno)); return TAOS_SYSTEM_ERROR(errno); } diff --git a/tests/develop-test/0-others/taos_shell.py b/tests/develop-test/0-others/taos_shell.py new file mode 100644 index 0000000000000000000000000000000000000000..b1293958c2ec8837b6166d3c1fd6e437392d91f3 --- /dev/null +++ b/tests/develop-test/0-others/taos_shell.py @@ -0,0 +1,97 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, db_test.stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +import time +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import json + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-16023]test taos shell with restful interface + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taos"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("%s not found!" % tool) + return + else: + tdLog.info("%s found in %s" %(tool,paths[0])) + return paths[0] + + def shellrun(self, cmd): + command = "%s -R -s \"%s\"" %(self.binPath,cmd) + result = os.popen(command).read().strip() + return result + + def checkresult(self, cmd, expect): + result = self.shellrun(cmd) + if expect not in result: + print(f"{expect} not in {result} with command: {cmd}") + assert False + else: + print(f"pass command: {cmd}") + + def run(self): + binPath = self.getPath() + self.binPath = binPath + self.checkresult("drop database if exists test", "Update OK") + self.checkresult("create database if not exists test", "Update OK") + self.checkresult("create stable test.stb (ts timestamp, c1 nchar(8), c2 double, c3 int) tags (t1 int)", "Update OK") + self.checkresult("create table test.tb1 using test.stb tags (1)", "Update OK") + self.checkresult("create table test.tb2 using test.stb tags (2)", "Update OK") + self.checkresult("select tbname from test.stb", "Query OK, 2 row(s) in set") + self.checkresult("insert into test.tb1 values (now, 'beijing', 1.23, 18)", "Update OK") + self.checkresult("insert into test.tb1 values (now, 'beijing', 1.23, 18)", "Update OK") + self.checkresult("insert into test.tb2 values (now, 'beijing', 1.23, 18)", "Update OK") + self.checkresult("insert into test.tb2 values (now, 'beijing', 1.23, 18)", "Update OK") + self.checkresult("select * from test.stb", "Query OK, 4 row(s) in set") + taosBenchmark = self.getPath(tool="taosBenchmark") + cmd = "%s -n 100 -t 100 -y" %taosBenchmark + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + self.checkresult("select * from test.meters", "Query OK, 10000 row(s) in set") + self.checkresult("select * from test.meters","Notice: The result shows only the first 100 rows") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/develop-test/3-connectors/java/test.sh b/tests/develop-test/3-connectors/java/test.sh index 9a3e7cbb085d3214dce6c19cdd9aee3bced1dc09..5e56651bcd4ec64e6446d4149bacbda041e4d4c3 100755 --- a/tests/develop-test/3-connectors/java/test.sh +++ b/tests/develop-test/3-connectors/java/test.sh @@ -25,9 +25,9 @@ nohup taosd -c ${taosdConfig} > /dev/null 2>&1 & nohup taosadapter -c ${adapterConfig} > /dev/null 2>&1 & sleep 10 -cd ../../../../ -WKC=`pwd` -cd ${WKC}/src/connector/jdbc +cd ../../ +git clone git@github.com:taosdata/taos-connector-jdbc.git --branch main --single-branch --depth 1 +cd taos-connector-jdbc mvn clean test > jdbc-out.log 2>&1 tail -n 20 jdbc-out.log diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py index a0567f3510a95708fb7469ec4549b81bda3aade2..792875af39932fe5da9e47a785dff384a7dabded 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py @@ -28,8 +28,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json" + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json" % binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") @@ -76,7 +99,7 @@ class TDTestCase: tdSql.query("select distinct(c12) from db.`stb1-2`") tdSql.checkData(0, 0, None) - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json" + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") @@ -93,7 +116,7 @@ class TDTestCase: tdSql.query("select count(*) from db.`stb2-2`") tdSql.checkData(0, 0, 160) - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/rest_auto_create_table.json" + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/rest_auto_create_table.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") @@ -110,7 +133,7 @@ class TDTestCase: tdSql.query("select count(*) from db.`stb3-2`") tdSql.checkData(0, 0, 160) - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_auto_create_table.json" + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_auto_create_table.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py b/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py index ffc7d4860156107663a415b8b156ab91557d68c3..3f2e2c821b0e3e8e28151fb77ef594444d7dcd2d 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py @@ -31,8 +31,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -F 7 -H 9 -n 10 -t 2 -x -y -M -C -d newtest -l 5 -A binary,nchar\(31\) -b tinyint,binary\(23\),bool,nchar -w 29 -E -m $%^*" + binPath = self.getPath() + cmd = "%s -F 7 -H 9 -n 10 -t 2 -x -y -M -C -d newtest -l 5 -A binary,nchar\(31\) -b tinyint,binary\(23\),bool,nchar -w 29 -E -m $%%^*" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("use newtest") @@ -58,7 +81,7 @@ class TDTestCase: tdSql.checkRows(2) tdSql.execute("drop database if exists newtest") - cmd = "taosBenchmark -F 7 -n 10 -t 2 -y -M -I stmt" + cmd = "%s -F 7 -n 10 -t 2 -y -M -I stmt" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.query("select count(tbname) from test.meters") @@ -66,55 +89,55 @@ class TDTestCase: tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 20) - cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 2>&1 | grep sleep | wc -l" + cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 2>&1 | grep sleep | wc -l" %binPath sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") tdLog.info("%s" % cmd) os.system("%s" % cmd) if (int(sleepTimes) != 2): tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes)) - cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 2>&1 | grep sleep | wc -l" + cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 2>&1 | grep sleep | wc -l" %binPath sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") tdLog.info("%s" % cmd) os.system("%s" % cmd) if (int(sleepTimes) != 3): tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes)) - cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I sml 2>&1 | grep sleep | wc -l" + cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I sml 2>&1 | grep sleep | wc -l" %binPath sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") tdLog.info("%s" % cmd) os.system("%s" % cmd) if (int(sleepTimes) != 2): tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes)) - cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I sml 2>&1 | grep sleep | wc -l" + cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I sml 2>&1 | grep sleep | wc -l" %binPath sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") tdLog.info("%s" % cmd) os.system("%s" % cmd) if (int(sleepTimes) != 3): tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes)) - cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I stmt 2>&1 | grep sleep | wc -l" + cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I stmt 2>&1 | grep sleep | wc -l" %binPath sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") tdLog.info("%s" % cmd) os.system("%s" % cmd) if (int(sleepTimes) != 2): tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes)) - cmd = "taosBenchmark -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I stmt 2>&1 | grep sleep | wc -l" + cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I stmt 2>&1 | grep sleep | wc -l" %binPath sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") tdLog.info("%s" % cmd) os.system("%s" % cmd) if (int(sleepTimes) != 3): tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes)) - cmd = "taosBenchmark -S 17 -n 3 -t 1 -y -x" + cmd = "%s -S 17 -n 3 -t 1 -y -x" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.query("select last(ts) from test.meters") tdSql.checkData(0, 0 , "2017-07-14 10:40:00.034") - cmd = "taosBenchmark -N -I taosc -t 11 -n 11 -y -x -E" + cmd = "%s -N -I taosc -t 11 -n 11 -y -x -E" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("use test") @@ -125,7 +148,7 @@ class TDTestCase: tdSql.query("select count(*) from `d10`") tdSql.checkData(0, 0, 11) - cmd = "taosBenchmark -N -I rest -t 11 -n 11 -y -x" + cmd = "%s -N -I rest -t 11 -n 11 -y -x" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("use test") @@ -136,7 +159,7 @@ class TDTestCase: tdSql.query("select count(*) from d10") tdSql.checkData(0, 0, 11) - cmd = "taosBenchmark -N -I stmt -t 11 -n 11 -y -x" + cmd = "%s -N -I stmt -t 11 -n 11 -y -x" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("use test") @@ -147,134 +170,134 @@ class TDTestCase: tdSql.query("select count(*) from d10") tdSql.checkData(0, 0, 11) - cmd = "taosBenchmark -N -I sml -y" + cmd = "%s -N -I sml -y" %binPath tdLog.info("%s" % cmd) assert(os.system("%s" % cmd) !=0 ) - cmd = "taosBenchmark -n 1 -t 1 -y -b bool" + cmd = "%s -n 1 -t 1 -y -b bool" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "BOOL") - cmd = "taosBenchmark -n 1 -t 1 -y -b tinyint" + cmd = "%s -n 1 -t 1 -y -b tinyint" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "TINYINT") - cmd = "taosBenchmark -n 1 -t 1 -y -b utinyint" + cmd = "%s -n 1 -t 1 -y -b utinyint" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "TINYINT UNSIGNED") - cmd = "taosBenchmark -n 1 -t 1 -y -b smallint" + cmd = "%s -n 1 -t 1 -y -b smallint" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "SMALLINT") - cmd = "taosBenchmark -n 1 -t 1 -y -b usmallint" + cmd = "%s -n 1 -t 1 -y -b usmallint" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "SMALLINT UNSIGNED") - cmd = "taosBenchmark -n 1 -t 1 -y -b int" + cmd = "%s -n 1 -t 1 -y -b int" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "INT") - cmd = "taosBenchmark -n 1 -t 1 -y -b uint" + cmd = "%s -n 1 -t 1 -y -b uint" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "INT UNSIGNED") - cmd = "taosBenchmark -n 1 -t 1 -y -b bigint" + cmd = "%s -n 1 -t 1 -y -b bigint" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "BIGINT") - cmd = "taosBenchmark -n 1 -t 1 -y -b ubigint" + cmd = "%s -n 1 -t 1 -y -b ubigint" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "BIGINT UNSIGNED") - cmd = "taosBenchmark -n 1 -t 1 -y -b timestamp" + cmd = "%s -n 1 -t 1 -y -b timestamp" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "TIMESTAMP") - cmd = "taosBenchmark -n 1 -t 1 -y -b float" + cmd = "%s -n 1 -t 1 -y -b float" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "FLOAT") - cmd = "taosBenchmark -n 1 -t 1 -y -b double" + cmd = "%s -n 1 -t 1 -y -b double" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "DOUBLE") - cmd = "taosBenchmark -n 1 -t 1 -y -b nchar" + cmd = "%s -n 1 -t 1 -y -b nchar" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "NCHAR") - cmd = "taosBenchmark -n 1 -t 1 -y -b nchar\(7\)" + cmd = "%s -n 1 -t 1 -y -b nchar\(7\)" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "NCHAR") - cmd = "taosBenchmark -n 1 -t 1 -y -b binary" + cmd = "%s -n 1 -t 1 -y -b binary" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "BINARY") - cmd = "taosBenchmark -n 1 -t 1 -y -b binary\(7\)" + cmd = "%s -n 1 -t 1 -y -b binary\(7\)" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(1, 1, "BINARY") - cmd = "taosBenchmark -n 1 -t 1 -y -A json\(7\)" + cmd = "%s -n 1 -t 1 -y -A json\(7\)" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") tdSql.query("describe test.meters") tdSql.checkData(4, 1, "JSON") - cmd = "taosBenchmark -n 1 -t 1 -y -b int,x" + cmd = "%s -n 1 -t 1 -y -b int,x" %binPath tdLog.info("%s" % cmd) assert(os.system("%s" % cmd) != 0) - cmd = "taosBenchmark -n 1 -t 1 -y -A int,json" + cmd = "%s -n 1 -t 1 -y -A int,json" %binPath tdLog.info("%s" % cmd) assert(os.system("%s" % cmd) != 0) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/custom_col_tag.py b/tests/develop-test/5-taos-tools/taosbenchmark/custom_col_tag.py index f6d52df8030427086148300fc2596e1482f086f7..99d51a6fe7dc82ea34b245af7cc6cef192fdb7d3 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/custom_col_tag.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/custom_col_tag.py @@ -28,8 +28,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/custom_col_tag.json" + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/custom_col_tag.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py index fd8bde5c1066833f9c2413b434dbc7e467a27b7b..a84fe0627484d594ffd0d32bcc4d4ea581ab66ec 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py @@ -28,8 +28,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/default.json" + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/default.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/demo.py b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py index 5d936d894bc8de3f88075de496b448f098eff0ea..2f56815c3160cca32c540032500de9d8f40d45c4 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/demo.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py @@ -31,8 +31,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -n 100 -t 100 -y" + binPath = self.getPath() + cmd = "%s -n 100 -t 100 -y" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("use test") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py index 107a48e117b66b28a0de3c4f974ec09005489e76..bcba5d8cc248348b9575fec9ab8f52f5d5fc8182 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py @@ -28,8 +28,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json" + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.query("select count(*) from db.stb") @@ -124,7 +147,7 @@ class TDTestCase: tdSql.checkData(0, 0, 160) - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json" + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.query("select count(*) from db.stb") @@ -162,7 +185,7 @@ class TDTestCase: tdSql.checkData(25, 1, "NCHAR") tdSql.checkData(26, 1, "NCHAR") - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json" + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.query("select count(*) from db.stb") @@ -204,7 +227,7 @@ class TDTestCase: tdSql.checkData(28, 1, "BINARY") tdSql.checkData(28, 2, 19) - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json" + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.query("select count(*) from db.stb") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py b/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py index 1ae8a775ae18639165bc1cb0fb85e7c1fabc43ac..31442527076f0ba6884c3e9fb37c553e1770a0da 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py @@ -28,34 +28,57 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -F abc -P abc -I abc -T abc -H abc -i abc -S abc -B abc -r abc -t abc -n abc -l abc -w abc -w 16385 -R abc -O abc -a abc -n 2 -t 2 -r 1 -y" + binPath = self.getPath() + cmd = "%s -F abc -P abc -I abc -T abc -H abc -i abc -S abc -B abc -r abc -t abc -n abc -l abc -w abc -w 16385 -R abc -O abc -a abc -n 2 -t 2 -r 1 -y" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 4) - cmd = "taosBenchmark non_exist_opt" + cmd = "%s non_exist_opt" %binPath tdLog.info("%s" % cmd) assert (os.system("%s" % cmd) != 0) - cmd = "taosBenchmark -f non_exist_file" + cmd = "%s -f non_exist_file" %binPath tdLog.info("%s" % cmd) assert (os.system("%s" % cmd) != 0) - cmd = "taosBenchmark -h non_exist_host" + cmd = "%s -h non_exist_host" %binPath tdLog.info("%s" % cmd) assert (os.system("%s" % cmd) != 0) - cmd = "taosBenchmark -p non_exist_pass" + cmd = "%s -p non_exist_pass" %binPath tdLog.info("%s" % cmd) assert (os.system("%s" % cmd) != 0) - cmd = "taosBenchmark -u non_exist_user" + cmd = "%s -u non_exist_user" %binPath tdLog.info("%s" % cmd) assert (os.system("%s" % cmd) != 0) - cmd = "taosBenchmark -c non_exist_dir -n 1 -t 1 -o non_exist_path -y" + cmd = "%s -c non_exist_dir -n 1 -t 1 -o non_exist_path -y" %binPath tdLog.info("%s" % cmd) assert (os.system("%s" % cmd) == 0) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py b/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py index 5b71f3a065de1708a6dbdf570f77d18db80f3e26..5effc317e962d581d65905700fef89c3d997bcde 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py @@ -28,8 +28,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_json_tag.json" + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_json_tag.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py index 803e57ecc873b89fdc941ee87f27ce38dde18a1a..b7f3fcd8262d171ee16f4e7b8f0f5ebb7aa84b38 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py @@ -28,8 +28,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_only_create_table.json" + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_only_create_table.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") @@ -48,7 +71,7 @@ class TDTestCase: tdSql.checkData(28, 2, 64) - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_limit_offset.json" + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_limit_offset.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py index 274729fada8f759535ad72979c9d5710390cc67f..00a7de5e0ea4d4b46eca42747477589428ce7998 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py @@ -32,7 +32,30 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): + binPath = self.getPath() os.system("rm -f rest_query_specified-0 rest_query_super-0 taosc_query_specified-0 taosc_query_super-0") tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db") @@ -41,7 +64,7 @@ class TDTestCase: tdSql.execute("insert into stb_0 using stb tags (0) values (now, 0)") tdSql.execute("insert into stb_1 using stb tags (1) values (now, 1)") tdSql.execute("insert into stb_2 using stb tags (2) values (now, 2)") - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_query.json" + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_query.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) with open("%s" % "taosc_query_specified-0", 'r+') as f1: @@ -54,7 +77,7 @@ class TDTestCase: queryTaosc = line.strip().split()[0] assert queryTaosc == '1', "result is %s != expect: 1" % queryTaosc - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/rest_query.json" + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/rest_query.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py index 5be777497930f14fa5d34bda3f54a8722f0e7dbc..3e0277b37d32ba57a26e89accd45d7f166cab455 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py @@ -28,8 +28,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json" + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py index 30ee6e87bdaaa26fad7550b0075939f8ee2f5cb9..842417e1d14ecd6ef73bb1178861fe4e8cd2a55d 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py @@ -28,8 +28,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_interlace.json" + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_interlace.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py index 6816be6156e39faacbda4c470d99dc347875494e..8d50201aa77028349c37b1ec755cc0d80c8ee7bc 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py @@ -28,8 +28,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_json_alltypes.json" + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_json_alltypes.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py index 0b96fd37389d61bf370ec54d85a160ad940970ae..e4b417374e51b40c25170efe1e1650f9e1a8d8d8 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py @@ -28,8 +28,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json" + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/subscripe_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/subscripe_json.py index 13ff130a742c94f4a2ba176fcaff796fe0f9f605..b897df70d1e0073708353183cdd55faab41eda92 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/subscripe_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/subscripe_json.py @@ -28,7 +28,30 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): + binPath = self.getPath() tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db") tdSql.execute("use db") @@ -36,11 +59,11 @@ class TDTestCase: tdSql.execute("insert into stb_0 using stb tags (0) values (now, 0)") tdSql.execute("insert into stb_1 using stb tags (1) values (now, 1)") tdSql.execute("insert into stb_2 using stb tags (2) values (now, 2)") - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/specified_subscribe.json -g" + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/specified_subscribe.json -g" %binPath tdLog.info("%s" % cmd) assert os.system("%s" % cmd) == 0 - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/super_subscribe.json -g" + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/super_subscribe.json -g" %binPath tdLog.info("%s" % cmd) assert os.system("%s" % cmd) == 0 diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py index 4f60979e2a31148d6a193819bebfd531421b7b5f..0fb6da0124778c2c226ed968ffbc5c283237cdf3 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py @@ -28,8 +28,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_rest_telnet.json" + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_rest_telnet.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") @@ -42,7 +65,7 @@ class TDTestCase: tdSql.query("select count(*) from db.stb2") tdSql.checkData(0, 0, 160) - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_rest_line.json" + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_rest_line.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") @@ -55,7 +78,7 @@ class TDTestCase: tdSql.query("select count(*) from db2.stb2") tdSql.checkData(0, 0, 160) - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_rest_json.json" + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_rest_json.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) tdSql.execute("reset query cache") diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py b/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py index 7603bcf40902d9b057774f812553b20961de093d..780c0530f22194ad43eb7aa64579ebf0aed123cf 100644 --- a/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py +++ b/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py @@ -29,8 +29,31 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + def run(self): - cmd = "taosBenchmark -f ./5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json" + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json" %binPath tdLog.info("%s" % cmd) os.system("%s" % cmd) time.sleep(5) diff --git a/tests/develop-test/fulltest-others.sh b/tests/develop-test/fulltest-others.sh index b9e056a67b455bbb4b2c6518f7b9b8665618713d..d39b4c1facb34791e341044917a98da302f2c048 100755 --- a/tests/develop-test/fulltest-others.sh +++ b/tests/develop-test/fulltest-others.sh @@ -1,2 +1,3 @@ python3 ./test.py -f 0-others/json_tag.py -python3 ./test.py -f 0-others/TD-12435.py \ No newline at end of file +python3 ./test.py -f 0-others/TD-12435.py +python3 ./test.py -f 0-others/taos_shell.py \ No newline at end of file diff --git a/tests/docs-examples-test/test_python.sh b/tests/docs-examples-test/test_python.sh index 22297ad92fc4c2efd821aaa197936ec08a89ef31..2b96311b29736951e71851af49f84f074428be72 100755 --- a/tests/docs-examples-test/test_python.sh +++ b/tests/docs-examples-test/test_python.sh @@ -9,7 +9,7 @@ cd ../../docs-examples/python # 1 taos -s "create database if not exists log" -python3 connect_exmaple.py +python3 connect_example.py # 2 taos -s "drop database if exists power" diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 738662427c40d680299f15001bf960d38998117a..fda76ba216e3e68321d02db16b49ffb422443d81 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -714,6 +714,7 @@ 5,,develop-test,python3 ./test.py -f 2-query/session_two_stage.py 5,,develop-test,python3 ./test.py -f 2-query/function_histogram.py 5,,develop-test,python3 ./test.py -f 0-others/TD-12435.py +5,,develop-test,python3 ./test.py -f 0-others/taos_shell.py 5,,develop-test,python3 ./test.py -f 0-others/json_tag.py 5,,develop-test,python3 ./test.py -f 2-query/function_mode.py 5,,develop-test,python3 ./test.py -f 2-query/function_now.py diff --git a/tests/pytest/account/account_create.py b/tests/pytest/account/account_create.py index 44745939d3e4ced77786512bcc93f77d85ad7545..c008acccd8d8d33af0ca77bd161d4ab020c1543e 100644 --- a/tests/pytest/account/account_create.py +++ b/tests/pytest/account/account_create.py @@ -33,9 +33,7 @@ class TDTestCase: tdSql.error("create user tdenginetdenginetdengine PASS 'test' ") - tdSql.execute("create user tdenginet PASS '1234512345123456' ") - - tdSql.error("create user tenginet PASS 'abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890' ") + tdSql.error("create user tdenginet PASS '1234512345123456' ") try: tdSql.execute("create account a&cc PASS 'pass123'") diff --git a/tests/pytest/query/queryFillTest.py b/tests/pytest/query/queryFillTest.py index e50d02faf27f33191f4a76c535443f6802e0ed28..1335e201d6bc64619df08fde4d0388077dafb7b9 100644 --- a/tests/pytest/query/queryFillTest.py +++ b/tests/pytest/query/queryFillTest.py @@ -85,6 +85,23 @@ class TDTestCase: tdSql.query("select spread(col1) from stb where ts > '2018-09-17 08:00:00.000' and ts < '2018-09-23 04:36:40.000' and id = 1 group by loc, id") tdSql.checkRows(rows) + + # case for TD-14782 + tdSql.execute("drop database if exists dd ") + tdSql.execute("create database dd keep 36500") + tdSql.execute("use dd") + tdSql.execute("create stable stable_1(ts timestamp , q_double double ) tags(loc nchar(100))") + tdSql.execute("create table stable_1_1 using stable_1 tags('stable_1_1')") + tdSql.execute("create table stable_1_2 using stable_1 tags('stable_1_2')") + tdSql.execute("insert into stable_1_1 (ts , q_double) values(1630000000000, 1)(1630000010000, 2)(1630000020000, 3)") + + tdSql.query("select STDDEV(q_double) from stable_1 where ts between 1630000001000 and 1630100001000 interval(18d) sliding(4d) Fill(NEXT) order by ts desc") + tdSql.checkRows(4) + tdSql.checkData(0, 1, 0.5) + tdSql.checkData(1, 1, 0.5) + tdSql.checkData(2, 1, 0.5) + tdSql.checkData(3, 1, 0.5) + def stop(self): tdSql.close() diff --git a/tests/script/general/user/pass_len.sim b/tests/script/general/user/pass_len.sim index d774c3fe291d473a58df211a90de5550bbaef89e..d6a011470a8eee4188e4aff1cec2786e3058e66d 100644 --- a/tests/script/general/user/pass_len.sim +++ b/tests/script/general/user/pass_len.sim @@ -59,7 +59,7 @@ endi print =============== step4 $i = 3 $user = $userPrefix . $i -sql create user $user PASS 'abcd123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890excess' -x step4 +sql create user $user PASS 'abcd012345678901234567891234567890' -x step4 return -1 step4: sql show users diff --git a/tests/script/unique/account/pass_len.sim b/tests/script/unique/account/pass_len.sim index efb0102aea6f04556e1112785c7bd69c2e31442c..d6731c7e48282e3320cd4a2dbf2c9d90fe46d875 100644 --- a/tests/script/unique/account/pass_len.sim +++ b/tests/script/unique/account/pass_len.sim @@ -58,7 +58,7 @@ endi print =============== step4 $i = 3 $user = $userPrefix . $i -sql create user $user PASS 'abcd0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890excess' -x step4 +sql create user $user PASS 'abcd012345678901234567891234567890' -x step4 return -1 step4: sql show users diff --git a/tests/system-test/5-taos-tools/taosdump/TD-12435.py b/tests/system-test/5-taos-tools/taosdump/TD-12435.py deleted file mode 100644 index 8f513ee8561bbe85687cf315cf091696d38d6b51..0000000000000000000000000000000000000000 --- a/tests/system-test/5-taos-tools/taosdump/TD-12435.py +++ /dev/null @@ -1,829 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import taos -import time -import os -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql - - -class TDTestCase: - def caseDescription(self): - ''' - case1:taosdump: char "`" can be used for both tag name and column name - ''' - return - - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - now = time.time() - self.ts = int(round(now * 1000)) - - def table1_checkall(self,sql): - tdLog.info(sql) - tdSql.query(sql) - tdSql.checkData(0,1,1) - tdSql.checkData(0,2,2) - tdSql.checkData(0,3,3) - tdSql.checkData(0,4,4) - tdSql.checkData(0,5,'True') - tdSql.checkData(0,6,6) - tdSql.checkData(0,7,7) - tdSql.checkData(0,8,8) - tdSql.checkData(0,9,9) - tdSql.checkData(0,10,'1970-01-01 08:00:00.010') - - def table1_checkall_1(self,sql): - tdSql.query(sql) - tdSql.checkData(0,1,1) - - def table1_checkall_2(self,sql): - self.table1_checkall_1(sql) - tdSql.checkData(0,2,2) - - def table1_checkall_3(self,sql): - self.table1_checkall_2(sql) - tdSql.checkData(0,3,3) - - def table1_checkall_4(self,sql): - self.table1_checkall_3(sql) - tdSql.checkData(0,4,4) - - def table1_checkall_5(self,sql): - self.table1_checkall_4(sql) - tdSql.checkData(0,5,'True') - - def table1_checkall_6(self,sql): - self.table1_checkall_5(sql) - tdSql.checkData(0,6,6) - - def table1_checkall_7(self,sql): - self.table1_checkall_6(sql) - tdSql.checkData(0,7,7) - - def table1_checkall_8(self,sql): - self.table1_checkall_7(sql) - tdSql.checkData(0,8,8) - - def table1_checkall_9(self,sql): - self.table1_checkall_8(sql) - tdSql.checkData(0,9,9) - - def table1_checkall_10(self,sql): - self.table1_checkall_9(sql) - tdSql.checkData(0,10,'1970-01-01 08:00:00.010') - - def run(self): - - testcaseFilename = os.path.split(__file__)[-1] - os.system("rm -rf 5-taos-tools/taosdump/%s.sql" % testcaseFilename ) - tdSql.prepare() - - print("==============step1") - print("prepare data") - - # case for defect: https://jira.taosdata.com:18080/browse/TD-2693 - tdSql.execute("create database db2") - tdSql.execute("use db2") - - print("==============new version [escape character] for stable==============") - print("==============step1,#create db.stable,db.table; insert db.table; show db.table; select db.table; drop db.table;") - print("prepare data") - - self.stb1 = "stable_1~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" - self.tb1 = "table_1~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" - - self.col_base = "123~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" - - self.col_int = "stable_col_int%s" %self.col_base - print(self.col_int) - self.col_bigint = "stable_col_bigint%s" %self.col_base - self.col_smallint = "stable_col_smallint%s" %self.col_base - self.col_tinyint = "stable_col_tinyint%s" %self.col_base - self.col_bool = "stable_col_bool%s" %self.col_base - self.col_binary = "stable_col_binary%s" %self.col_base - self.col_nchar = "stable_col_nchar%s" %self.col_base - self.col_float = "stable_col_float%s" %self.col_base - self.col_double = "stable_col_double%s" %self.col_base - self.col_ts = "stable_col_ts%s" %self.col_base - - self.tag_base = "abc~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" - self.tag_int = "stable_tag_int%s" %self.tag_base - self.tag_bigint = "stable_tag_bigint%s" %self.tag_base - self.tag_smallint = "stable_tag_smallint%s" %self.tag_base - self.tag_tinyint = "stable_tag_tinyint%s" %self.tag_base - self.tag_bool = "stable_tag_bool%s" %self.tag_base - self.tag_binary = "stable_tag_binary%s" %self.tag_base - self.tag_nchar = "stable_tag_nchar%s" %self.tag_base - self.tag_float = "stable_tag_float%s" %self.tag_base - self.tag_double = "stable_tag_double%s" %self.tag_base - self.tag_ts = "stable_tag_ts%s" %self.tag_base - - tdSql.execute('''create stable db.`%s` (ts timestamp, `%s` int , `%s` bigint , `%s` smallint , `%s` tinyint, `%s` bool , - `%s` binary(20) , `%s` nchar(20) ,`%s` float , `%s` double , `%s` timestamp) - tags(loc nchar(20), `%s` int , `%s` bigint , `%s` smallint , `%s` tinyint, `%s` bool , - `%s` binary(20) , `%s` nchar(20) ,`%s` float , `%s` double , `%s` timestamp);''' - %(self.stb1, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, - self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, - self.tag_int, self.tag_bigint, self.tag_smallint, self.tag_tinyint, self.tag_bool, - self.tag_binary, self.tag_nchar, self.tag_float, self.tag_double, self.tag_ts)) - tdSql.query("describe db.`%s` ; " %self.stb1) - tdSql.checkRows(22) - - tdSql.query("select _block_dist() from db.`%s` ; " %self.stb1) - tdSql.checkRows(0) - - tdSql.query("show create stable db.`%s` ; " %self.stb1) - tdSql.checkData(0, 0, self.stb1) - tdSql.checkData(0, 1, "CREATE TABLE `%s` (`ts` TIMESTAMP,`%s` INT,`%s` BIGINT,`%s` SMALLINT,`%s` TINYINT,`%s` BOOL,`%s` BINARY(20),`%s` NCHAR(20),`%s` FLOAT,`%s` DOUBLE,`%s` TIMESTAMP)\ - TAGS (`loc` NCHAR(20),`%s` INT,`%s` BIGINT,`%s` SMALLINT,`%s` TINYINT,`%s` BOOL,`%s` BINARY(20),`%s` NCHAR(20),`%s` FLOAT,`%s` DOUBLE,`%s` TIMESTAMP)" - %(self.stb1, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, - self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, - self.tag_int, self.tag_bigint, self.tag_smallint, self.tag_tinyint, self.tag_bool, - self.tag_binary, self.tag_nchar, self.tag_float, self.tag_double, self.tag_ts)) - - tdSql.execute("create table db.`table!1` using db.`%s` tags('table_1' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')" %self.stb1) - tdSql.query("describe db.`table!1` ; ") - tdSql.checkRows(22) - - time.sleep(10) - tdSql.query("show create table db.`table!1` ; ") - tdSql.checkData(0, 0, "table!1") - tdSql.checkData(0, 1, "CREATE TABLE `table!1` USING `%s` TAGS (\"table_1\",0,0,0,0,false,\"0\",\"0\",0.000000,0.000000,\"0\")" %self.stb1) - - tdSql.execute("insert into db.`table!1` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)") - sql = " select * from db.`table!1`; " - datacheck = self.table1_checkall(sql) - tdSql.checkRows(1) - sql = '''select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`table!1`; '''\ - %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) - datacheck = self.table1_checkall(sql) - tdSql.checkRows(1) - - time.sleep(1) - tdSql.execute('''insert into db.`table!1`(ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`) values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)'''\ - %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) ) - sql = " select * from db.`table!1`; " - datacheck = self.table1_checkall(sql) - tdSql.checkRows(2) - - tdSql.query("select count(*) from db.`table!1`; ") - tdSql.checkData(0, 0, 2) - tdSql.query("select _block_dist() from db.`%s` ; " %self.stb1) - tdSql.checkRows(1) - - tdSql.execute("create table db.`%s` using db.`%s` TAGS (\"table_2\",2,2,2,2,true,\"2\",\"2\",2.000000,2.000000,\"2\")" %(self.tb1,self.stb1)) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - tdSql.query("show create table db.`%s` ; " %self.tb1) - tdSql.checkData(0, 0, self.tb1) - tdSql.checkData(0, 1, "CREATE TABLE `%s` USING `%s` TAGS (\"table_2\",2,2,2,2,true,\"2\",\"2\",2.000000,2.000000,\"2\")" %(self.tb1,self.stb1)) - - tdSql.execute("insert into db.`%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" %self.tb1) - sql = "select * from db.`%s` ; " %self.tb1 - datacheck = self.table1_checkall(sql) - tdSql.checkRows(1) - sql = '''select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`%s` ; '''\ - %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts,\ - self.tag_int, self.tag_bigint, self.tag_smallint, self.tag_tinyint, self.tag_bool, self.tag_binary, self.tag_nchar, self.tag_float, self.tag_double, self.tag_ts, self.tb1) - datacheck = self.table1_checkall(sql) - tdSql.checkRows(1) - - time.sleep(1) - tdSql.execute('''insert into db.`%s`(ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`) values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)'''\ - %(self.tb1, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) ) - sql = " select * from db.`%s` ; " %self.tb1 - datacheck = self.table1_checkall(sql) - tdSql.checkRows(2) - - sql = " select * from db.`%s` where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10; " \ - %(self.tb1, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) - datacheck = self.table1_checkall(sql) - tdSql.checkRows(2) - - tdSql.query("select count(*) from db.`%s`; " %self.tb1) - tdSql.checkData(0, 0, 2) - sql = "select * from db.`%s` ; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.checkRows(4) - tdSql.query("select count(*) from db.`%s`; " %self.stb1) - tdSql.checkData(0, 0, 4) - - sql = "select * from (select * from db.`%s`) ; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.checkRows(4) - tdSql.query("select count(*) from (select * from db.`%s`) ; " %self.stb1) - tdSql.checkData(0, 0, 4) - - sql = "select * from (select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`%s`) ; " \ - %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.stb1) - datacheck = self.table1_checkall(sql) - tdSql.checkRows(4) - - sql = "select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from (select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`%s`) ; " \ - %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts,\ - self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.stb1) - datacheck = self.table1_checkall(sql) - tdSql.checkRows(4) - - sql = "select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from (select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db.`%s`\ - where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10 ) ; " \ - %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts,\ - self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.stb1, \ - self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) - datacheck = self.table1_checkall(sql) - tdSql.checkRows(4) - - tdSql.query("show db.stables like 'stable_1%' ") - tdSql.checkRows(1) - tdSql.query("show db.tables like 'table%' ") - tdSql.checkRows(2) - - self.cr_tb1 = "create_table_1~!@#$%^&*()-_+=[]{}':,<.>/?stST13579" - tdSql.execute("create table db.`%s` as select avg(`%s`) from db.`%s` where ts > now interval(1m) sliding(30s);" %(self.cr_tb1,self.col_bigint,self.stb1)) - tdSql.query("show db.tables like 'create_table_%' ") - tdSql.checkRows(1) - - print("==============drop\ add\ change\ modify column or tag") - print("==============drop==============") - tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_ts)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(21) - tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_double)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(20) - tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_float)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(19) - tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_nchar)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(18) - tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_binary)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(17) - tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_bool)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(16) - tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_tinyint)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(15) - tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_smallint)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(14) - tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_bigint)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(13) - tdSql.execute("ALTER TABLE db.`%s` DROP TAG `%s`; " %(self.stb1, self.tag_int)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(12) - - tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_ts)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall_9(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(11) - tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_double)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall_8(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(10) - tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_float)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall_7(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(9) - tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_nchar)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall_6(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(8) - tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_binary)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall_5(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(7) - tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_bool)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall_4(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(6) - tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_tinyint)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall_3(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(5) - tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_smallint)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall_2(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(4) - tdSql.execute("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_bigint)) - sql = " select * from db.`%s`; " %self.stb1 - datacheck = self.table1_checkall_1(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(3) - tdSql.error("ALTER TABLE db.`%s` DROP COLUMN `%s`; " %(self.stb1, self.col_int)) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(3) - - print("==============add==============") - tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` bigint; " %(self.stb1, self.col_bigint)) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(4) - tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` smallint; " %(self.stb1, self.col_smallint)) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(5) - tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` tinyint; " %(self.stb1, self.col_tinyint)) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(6) - tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` bool; " %(self.stb1, self.col_bool)) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(7) - tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` binary(20); " %(self.stb1, self.col_binary)) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(8) - - tdSql.execute("insert into db.`%s` values(now, 1 , 2, 3, 4, 5, 6)" %self.tb1) - sql = "select * from db.`%s` order by ts desc; " %self.tb1 - datacheck = self.table1_checkall_5(sql) - - tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` nchar(20); " %(self.stb1, self.col_nchar)) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(9) - tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` float; " %(self.stb1, self.col_float)) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(10) - tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` double; " %(self.stb1, self.col_double)) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(11) - tdSql.execute("ALTER TABLE db.`%s` ADD COLUMN `%s` timestamp; " %(self.stb1, self.col_ts)) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(12) - - tdSql.execute("insert into db.`%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" %self.tb1) - sql = "select * from db.`%s` order by ts desc; " %self.tb1 - datacheck = self.table1_checkall(sql) - - tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` int; " %(self.stb1, self.tag_int)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(13) - tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` bigint; " %(self.stb1, self.tag_bigint)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(14) - tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` smallint; " %(self.stb1, self.tag_smallint)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(15) - tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` tinyint; " %(self.stb1, self.tag_tinyint)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(16) - tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` bool; " %(self.stb1, self.tag_bool)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(17) - tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` binary(20); " %(self.stb1, self.tag_binary)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(18) - tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` nchar(20); " %(self.stb1, self.tag_nchar)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(19) - tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` float; " %(self.stb1, self.tag_float)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(20) - tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` double; " %(self.stb1, self.tag_double)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(21) - tdSql.execute("ALTER TABLE db.`%s` ADD TAG `%s` timestamp; " %(self.stb1, self.tag_ts)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - - print("==============change==============") - self.tag_base_change = "abcdas" - self.tag_int_change = "stable_tag_int%s" %self.tag_base_change - self.tag_bigint_change = "stable_tag_bigint%s" %self.tag_base_change - self.tag_smallint_change = "stable_tag_smallint%s" %self.tag_base_change - self.tag_tinyint_change = "stable_tag_tinyint%s" %self.tag_base_change - self.tag_bool_change = "stable_tag_bool%s" %self.tag_base_change - self.tag_binary_change = "stable_tag_binary%s" %self.tag_base_change - self.tag_nchar_change = "stable_tag_nchar%s" %self.tag_base_change - self.tag_float_change = "stable_tag_float%s" %self.tag_base_change - self.tag_double_change = "stable_tag_double%s" %self.tag_base_change - self.tag_ts_change = "stable_tag_ts%s" %self.tag_base_change - - tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_int, self.tag_int_change)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_bigint, self.tag_bigint_change)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_smallint, self.tag_smallint_change)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_tinyint, self.tag_tinyint_change)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_bool, self.tag_bool_change)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_binary, self.tag_binary_change)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_nchar, self.tag_nchar_change)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_float, self.tag_float_change)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_double, self.tag_double_change)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - tdSql.execute("ALTER TABLE db.`%s` CHANGE TAG `%s` `%s`; " %(self.stb1, self.tag_ts, self.tag_ts_change)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - - print("==============modify==============") - # TD-10810 - tdSql.execute("ALTER STABLE db.`%s` MODIFY TAG `%s` binary(30); ; " %(self.stb1, self.tag_binary_change)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - tdSql.execute("ALTER STABLE db.`%s` MODIFY TAG `%s` nchar(30); ; " %(self.stb1, self.tag_nchar_change)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - - tdSql.execute("ALTER STABLE db.`%s` MODIFY COLUMN `%s` binary(30); ; " %(self.stb1, self.col_binary)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - tdSql.execute("ALTER STABLE db.`%s` MODIFY COLUMN `%s` nchar(30); ; " %(self.stb1, self.col_nchar)) - sql = " select * from db.`%s` order by ts desc; " %self.stb1 - datacheck = self.table1_checkall(sql) - tdSql.query("describe db.`%s` ; " %self.tb1) - tdSql.checkRows(22) - - print("==============drop table\stable") - try: - tdSql.execute("drop table db.`%s` " %self.tb1) - except Exception as e: - tdLog.exit(e) - - tdSql.error("select * from db.`%s`" %self.tb1) - tdSql.query("show db.stables like 'stable_1%' ") - tdSql.checkRows(1) - - try: - tdSql.execute("drop table db.`%s` " %self.stb1) - except Exception as e: - tdLog.exit(e) - - tdSql.error("select * from db.`%s`" %self.tb1) - tdSql.error("select * from db.`%s`" %self.stb1) - - - print("==============step2,#create stable,table; insert table; show table; select table; drop table") - - self.stb2 = "stable_2~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" - self.tb2 = "table_2~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" - - tdSql.execute("create stable `%s` (ts timestamp, i int) tags(j int);" %self.stb2) - tdSql.query("describe `%s` ; "%self.stb2) - tdSql.checkRows(3) - - tdSql.query("select _block_dist() from `%s` ; " %self.stb2) - tdSql.checkRows(0) - - tdSql.query("show create stable `%s` ; " %self.stb2) - tdSql.checkData(0, 0, self.stb2) - tdSql.checkData(0, 1, "CREATE TABLE `%s` (`ts` TIMESTAMP,`i` INT) TAGS (`j` INT)" %self.stb2) - - tdSql.execute("create table `table!2` using `%s` tags(1)" %self.stb2) - tdSql.query("describe `table!2` ; ") - tdSql.checkRows(3) - - time.sleep(10) - - tdSql.query("show create table `table!2` ; ") - tdSql.checkData(0, 0, "table!2") - tdSql.checkData(0, 1, "CREATE TABLE `table!2` USING `%s` TAGS (1)" %self.stb2) - tdSql.execute("insert into `table!2` values(now, 1)") - tdSql.query("select * from `table!2`; ") - tdSql.checkRows(1) - tdSql.query("select count(*) from `table!2`; ") - tdSql.checkData(0, 0, 1) - tdSql.query("select _block_dist() from `%s` ; " %self.stb2) - tdSql.checkRows(1) - - tdSql.execute("create table `%s` using `%s` tags(1)" %(self.tb2,self.stb2)) - tdSql.query("describe `%s` ; " %self.tb2) - tdSql.checkRows(3) - tdSql.query("show create table `%s` ; " %self.tb2) - tdSql.checkData(0, 0, self.tb2) - tdSql.checkData(0, 1, "CREATE TABLE `%s` USING `%s` TAGS (1)" %(self.tb2,self.stb2)) - tdSql.execute("insert into `%s` values(now, 1)" %self.tb2) - tdSql.query("select * from `%s` ; " %self.tb2) - tdSql.checkRows(1) - tdSql.query("select count(*) from `%s`; " %self.tb2) - tdSql.checkData(0, 0, 1) - tdSql.query("select * from `%s` ; " %self.stb2) - tdSql.checkRows(2) - tdSql.query("select count(*) from `%s`; " %self.stb2) - tdSql.checkData(0, 0, 2) - - tdSql.query("select * from (select * from `%s`) ; " %self.stb2) - tdSql.checkRows(2) - tdSql.query("select count(*) from (select * from `%s` ); " %self.stb2) - tdSql.checkData(0, 0, 2) - - tdSql.query("show stables like 'stable_2%' ") - tdSql.checkRows(1) - tdSql.query("show tables like 'table%' ") - tdSql.checkRows(2) - - - #TD-10536 - self.cr_tb2 = "create_table_2~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" - tdSql.execute("create table `%s` as select * from `%s` ;" %(self.cr_tb2,self.stb2)) - tdSql.query("show db.tables like 'create_table_%' ") - tdSql.checkRows(1) - - - print("==============step3,#create regular_table; insert regular_table; show regular_table; select regular_table; drop regular_table") - self.regular_table = "regular_table~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" - - self.regular_col_base = "123@#$%^&*()-_+=[]{};:,<.>/?~!$%^" - - self.col_int = "regular_table_col_int%s" %self.regular_col_base - print(self.col_int) - self.col_bigint = "regular_table_col_bigint%s" %self.regular_col_base - self.col_smallint = "regular_table_col_smallint%s" %self.regular_col_base - self.col_tinyint = "regular_table_col_tinyint%s" %self.regular_col_base - self.col_bool = "regular_table_col_bool%s" %self.regular_col_base - self.col_binary = "regular_table_col_binary%s" %self.regular_col_base - self.col_nchar = "regular_table_col_nchar%s" %self.regular_col_base - self.col_float = "regular_table_col_float%s" %self.regular_col_base - self.col_double = "regular_table_col_double%s" %self.regular_col_base - self.col_ts = "regular_table_col_ts%s" %self.regular_col_base - - tdSql.execute("create table `%s` (ts timestamp,`%s` int , `%s` bigint , `%s` smallint , `%s` tinyint, `%s` bool , \ - `%s` binary(20) , `%s` nchar(20) ,`%s` float , `%s` double , `%s` timestamp) ;"\ - %(self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, - self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts)) - tdSql.query("describe `%s` ; "%self.regular_table) - tdSql.checkRows(11) - - tdSql.query("select _block_dist() from `%s` ; " %self.regular_table) - tdSql.checkRows(1) - - tdSql.query("show create table `%s` ; " %self.regular_table) - tdSql.checkData(0, 0, self.regular_table) - tdSql.checkData(0, 1, "CREATE TABLE `%s` (`ts` TIMESTAMP,`%s` INT,`%s` BIGINT,`%s` SMALLINT,`%s` TINYINT,`%s` BOOL,`%s` BINARY(20),`%s` NCHAR(20),`%s` FLOAT,`%s` DOUBLE,`%s` TIMESTAMP)" - %(self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, - self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts)) - - tdSql.execute("insert into `%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" %self.regular_table) - sql = "select * from `%s` ; " %self.regular_table - datacheck = self.table1_checkall(sql) - tdSql.checkRows(1) - sql = '''select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db2.`%s`; '''\ - %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.regular_table) - datacheck = self.table1_checkall(sql) - tdSql.checkRows(1) - - time.sleep(1) - tdSql.execute('''insert into db2.`%s` (ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`) values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)'''\ - %(self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) ) - sql = " select * from db2.`%s`; " %self.regular_table - datacheck = self.table1_checkall(sql) - tdSql.checkRows(2) - - sql = " select * from db2.`%s` where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10; " \ - %(self.regular_table, self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) - datacheck = self.table1_checkall(sql) - tdSql.checkRows(2) - - tdSql.query("select count(*) from `%s`; " %self.regular_table) - tdSql.checkData(0, 0, 2) - tdSql.query("select _block_dist() from `%s` ; " %self.regular_table) - tdSql.checkRows(1) - - sql = "select * from (select * from `%s`) ; " %self.regular_table - datacheck = self.table1_checkall(sql) - tdSql.checkRows(2) - - sql = "select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from (select ts ,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s`,`%s` from db2.`%s`\ - where `%s`=1 and `%s`=2 and `%s`=3 and `%s`=4 and `%s`='True' and `%s`=6 and `%s`=7 and `%s`=8 and `%s`=9 and `%s`=10 ) ; " \ - %(self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts,\ - self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts, self.regular_table, \ - self.col_int, self.col_bigint, self.col_smallint, self.col_tinyint, self.col_bool, self.col_binary, self.col_nchar, self.col_float, self.col_double, self.col_ts) - datacheck = self.table1_checkall(sql) - tdSql.checkRows(2) - - tdSql.query("select count(*) from (select * from `%s` ); " %self.regular_table) - tdSql.checkData(0, 0, 2) - - tdSql.query("show tables like 'regular_table%' ") - tdSql.checkRows(1) - - self.crr_tb = "create_r_table~!@#$%^&*()-_+=[]{}';:,<.>/?stST24680~!@#$%^&*()-_+=[]{}" - tdSql.execute("create table `%s` as select * from `%s` ;" %(self.crr_tb,self.regular_table)) - tdSql.query("show db2.tables like 'create_r_table%' ") - tdSql.checkRows(1) - - - print("==============drop\ add\ change\ modify column ") - print("==============drop==============") - tdSql.execute("ALTER TABLE db2.`%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_ts)) - sql = " select * from db2.`%s`; " %self.regular_table - datacheck = self.table1_checkall_9(sql) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(10) - tdSql.execute("ALTER TABLE `%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_double)) - sql = " select * from `%s`; " %self.regular_table - datacheck = self.table1_checkall_8(sql) - tdSql.query("describe `%s` ; " %self.regular_table) - tdSql.checkRows(9) - tdSql.execute("ALTER TABLE db2.`%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_float)) - sql = " select * from db2.`%s`; " %self.regular_table - datacheck = self.table1_checkall_7(sql) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(8) - tdSql.execute("ALTER TABLE `%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_nchar)) - sql = " select * from `%s`; " %self.regular_table - datacheck = self.table1_checkall_6(sql) - tdSql.query("describe `%s` ; " %self.regular_table) - tdSql.checkRows(7) - tdSql.execute("ALTER TABLE db2.`%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_binary)) - sql = " select * from db2.`%s`; " %self.regular_table - datacheck = self.table1_checkall_5(sql) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(6) - tdSql.execute("ALTER TABLE `%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_bool)) - sql = " select * from `%s`; " %self.regular_table - datacheck = self.table1_checkall_4(sql) - tdSql.query("describe `%s` ; " %self.regular_table) - tdSql.checkRows(5) - tdSql.execute("ALTER TABLE db2.`%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_tinyint)) - sql = " select * from db2.`%s`; " %self.regular_table - datacheck = self.table1_checkall_3(sql) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(4) - tdSql.execute("ALTER TABLE `%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_smallint)) - sql = " select * from `%s`; " %self.regular_table - datacheck = self.table1_checkall_2(sql) - tdSql.query("describe `%s` ; " %self.regular_table) - tdSql.checkRows(3) - tdSql.execute("ALTER TABLE db2.`%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_bigint)) - sql = " select * from db2.`%s`; " %self.regular_table - datacheck = self.table1_checkall_1(sql) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(2) - tdSql.error("ALTER TABLE db2.`%s` DROP COLUMN `%s`; " %(self.regular_table, self.col_int)) - tdSql.query("describe `%s` ; " %self.regular_table) - tdSql.checkRows(2) - - print("==============add==============") - tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` bigint; " %(self.regular_table, self.col_bigint)) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(3) - tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` smallint; " %(self.regular_table, self.col_smallint)) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(4) - tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` tinyint; " %(self.regular_table, self.col_tinyint)) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(5) - tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` bool; " %(self.regular_table, self.col_bool)) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(6) - tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` binary(20); " %(self.regular_table, self.col_binary)) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(7) - - tdSql.execute("insert into db2.`%s` values(now, 1 , 2, 3, 4, 5, 6)" %self.regular_table) - sql = "select * from db2.`%s` order by ts desc; " %self.regular_table - datacheck = self.table1_checkall_5(sql) - - tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` nchar(20); " %(self.regular_table, self.col_nchar)) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(8) - tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` float; " %(self.regular_table, self.col_float)) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(9) - tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` double; " %(self.regular_table, self.col_double)) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(10) - tdSql.execute("ALTER TABLE db2.`%s` ADD COLUMN `%s` timestamp; " %(self.regular_table, self.col_ts)) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(11) - - tdSql.execute("insert into db2.`%s` values(now, 1 , 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10)" %self.regular_table) - sql = "select * from db2.`%s` order by ts desc; " %self.regular_table - datacheck = self.table1_checkall(sql) - - - print("==============change, regular not support==============") - - - print("==============modify==============") - # TD-10810 - tdSql.execute("ALTER TABLE db2.`%s` MODIFY COLUMN `%s` binary(30); ; " %(self.regular_table, self.col_binary)) - sql = " select * from db2.`%s` order by ts desc; " %self.regular_table - datacheck = self.table1_checkall(sql) - tdSql.query("describe db2.`%s` ; " %self.regular_table) - tdSql.checkRows(11) - tdSql.execute("ALTER TABLE `%s` MODIFY COLUMN `%s` nchar(30); ; " %(self.regular_table, self.col_nchar)) - sql = " select * from `%s` order by ts desc; " %self.regular_table - datacheck = self.table1_checkall(sql) - tdSql.query("describe `%s` ; " %self.regular_table) - tdSql.checkRows(11) - - - assert os.system("taosdump -D db -y") == 0 - assert os.system("taosdump -D db2 -y") == 0 - - assert os.system("taosdump -i . -g -y") == 0 - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase())
10.3 219 0.31Beijing.ChaoyangCalifornia.SanFrancisco 2
10.2 220 0.23Beijing.ChaoyangCalifornia.SanFrancisco 3
11.5 221 0.35Beijing.HaidianCalifornia.LosAngeles 3
13.4 223 0.29Beijing.HaidianCalifornia.LosAngeles 2
12.6 218 0.33Beijing.ChaoyangCalifornia.SanFrancisco 2
11.8 221 0.28Beijing.HaidianCalifornia.LosAngeles 2
10.3 218 0.25Beijing.ChaoyangCalifornia.SanFrancisco 3
12.3 221 0.31Beijing.ChaoyangCalifornia.SanFrancisco 2