diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md index 23cf1203b29c24ef92214e11f2c40b09796b4cd6..1637a3d4546b3e52369e152555d5faab46633ec9 100644 --- a/docs/en/02-intro/index.md +++ b/docs/en/02-intro/index.md @@ -3,7 +3,7 @@ title: Introduction toc_max_heading_level: 2 --- -TDengine is a high-performance, scalable time-series database with SQL support. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](/develop/cache), [stream processing](/develop/continuous-query), [data subscription](/develop/subscribe) and other functionalities to reduce the complexity and cost of development and operation. +TDengine is a high-performance, scalable time-series database with SQL support. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](/develop/cache), [stream processing](../develop/continuous-query), [data subscription](../develop/subscribe) and other functionalities to reduce the complexity and cost of development and operation. This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine. @@ -16,9 +16,9 @@ The major features are listed below: 3. Support for [all kinds of queries](/develop/query-data), including aggregation, nested query, downsampling, interpolation and others. 4. Support for [user defined functions](/develop/udf). 5. Support for [caching](/develop/cache). TDengine always saves the last data point in cache, so Redis is not needed in some scenarios. -6. Support for [continuous query](/develop/continuous-query). -7. Support for [data subscription](/develop/subscribe) with the capability to specify filter conditions. -8. Support for [cluster](/cluster/), with the capability of increasing processing power by adding more nodes. High availability is supported by replication. +6. Support for [continuous query](../develop/continuous-query). +7. Support for [data subscription](../develop/subscribe) with the capability to specify filter conditions. +8. Support for [cluster](../cluster/), with the capability of increasing processing power by adding more nodes. High availability is supported by replication. 9. Provides an interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc queries. 10. Provides many ways to [import](/operation/import) and [export](/operation/export) data. 11. Provides [monitoring](/operation/monitor) on running instances of TDengine. diff --git a/docs/en/05-get-started/_pkg_install.mdx b/docs/en/05-get-started/_pkg_install.mdx index cf10497c96ba1d777e45340b0312d97c127b6fcb..2d514d6cd22b94cbe3da8e833d9f5f9f24da733f 100644 --- a/docs/en/05-get-started/_pkg_install.mdx +++ b/docs/en/05-get-started/_pkg_install.mdx @@ -10,8 +10,6 @@ Between two major release versions, some beta versions may be delivered for user -For the details please refer to [Install and Uninstall](/operation/pkg-install)。 +For the details please refer to [Install and Uninstall](../13-operation/01-pkg-install.md). To see the details of versions, please refer to [Download List](https://tdengine.com/all-downloads) and [Release Notes](https://github.com/taosdata/TDengine/releases). - - diff --git a/docs/en/05-get-started/index.md b/docs/en/05-get-started/index.md index 9c5a853820487c87a0c9691bb295e5c1aabe3b12..0450d132ddb56e16f2f521887637b3e2096da7dd 100644 --- a/docs/en/05-get-started/index.md +++ b/docs/en/05-get-started/index.md @@ -10,7 +10,7 @@ import AptGetInstall from "./\_apt_get_install.mdx"; ## Quick Install -The full package of TDengine includes the server(taosd), taosAdapter for connecting with third-party systems and providing a RESTful interface, client driver(taosc), command-line program(CLI, taos) and some tools. For the current version, the server taosd and taosAdapter can only be installed and run on Linux systems. In the future taosd and taosAdapter will also be supported on Windows, macOS and other systems. The client driver taosc and TDengine CLI can be installed and run on Windows or Linux. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter). Prior to version 2.4.0.0, taosAdapter did not exist and the RESTful interface was provided by the built-in HTTP service of taosd. +The full package of TDengine includes the server(taosd), taosAdapter for connecting with third-party systems and providing a RESTful interface, client driver(taosc), command-line program(CLI, taos) and some tools. For the current version, the server taosd and taosAdapter can only be installed and run on Linux systems. In the future taosd and taosAdapter will also be supported on Windows, macOS and other systems. The client driver taosc and TDengine CLI can be installed and run on Windows or Linux. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](../14-reference/02-rest-api/02-rest-api.mdx) through [taosAdapter](../14-reference/04-taosadapter.md). Prior to version 2.4.0.0, taosAdapter did not exist and the RESTful interface was provided by the built-in HTTP service of taosd. TDengine supports X64/ARM64/MIPS64/Alpha64 hardware platforms, and will support ARM32, RISC-V and other CPU architectures in the future. @@ -36,7 +36,7 @@ docker exec -it bash Then you can execute the Linux commands and access TDengine. -For detailed steps, please visit [Experience TDengine via Docker](/train-faq/docker)。 +For detailed steps, please visit [Experience TDengine via Docker](../27-train-faq/03-docker.md). :::info Starting from 2.4.0.10,besides taosd,TDengine docker image includes: taos,taosAdapter,taosdump,taosBenchmark,TDinsight, scripts and sample code. Once the TDengine container is started,it will start both taosAdapter and taosd automatically to support RESTful interface. @@ -98,7 +98,7 @@ To manage the TDengine running instance,or execute ad-hoc queries, TDengine pr taos ``` -If it connects to the TDengine server successfully, it will print out the version and welcome message. If it fails, it will print out the error message, please check [FAQ](/train-faq/faq) for trouble shooting connection issue. TDengine CLI's prompt is: +If it connects to the TDengine server successfully, it will print out the version and welcome message. If it fails, it will print out the error message, please check [FAQ](../27-train-faq/01-faq.md) for trouble shooting connection issue. TDengine CLI's prompt is: ```cmd taos> @@ -120,7 +120,7 @@ select * from t; Query OK, 2 row(s) in set (0.003128s) ``` -Besides executing SQL commands, system administrators can check running status, add/drop user accounts and manage the running instances. TDengine CLI with client driver can be installed and run on either Linux or Windows machines. For more details on CLI, please [check here](../reference/taos-shell/). +Besides executing SQL commands, system administrators can check running status, add/drop user accounts and manage the running instances. TDengine CLI with client driver can be installed and run on either Linux or Windows machines. For more details on CLI, please [check here](../14-reference/08-taos-shell.md). ## Experience the blazing fast speed @@ -134,7 +134,7 @@ This command will create a super table "meters" under database "test". Under "me This command will insert 100 million rows into the database quickly. Time to insert depends on the hardware configuration, it only takes a dozen seconds for a regular PC server. -taosBenchmark provides command-line options and a configuration file to customize the scenarios, like number of tables, number of rows per table, number of columns and more. Please execute `taosBenchmark --help` to list them. For details on running taosBenchmark, please check [reference for taosBenchmark](/reference/taosbenchmark) +taosBenchmark provides command-line options and a configuration file to customize the scenarios, like number of tables, number of rows per table, number of columns and more. Please execute `taosBenchmark --help` to list them. For details on running taosBenchmark, please check [reference for taosBenchmark](../14-reference/05-taosbenchmark.md) ## Experience query speed diff --git a/docs/en/07-develop/04-query-data/index.mdx b/docs/en/07-develop/04-query-data/index.mdx index a212fa9529215fc24c55c95a166cfc1a407359b2..e8e4b5c0ad555c0807af5f50a75afdffc1aaa50c 100644 --- a/docs/en/07-develop/04-query-data/index.mdx +++ b/docs/en/07-develop/04-query-data/index.mdx @@ -48,7 +48,7 @@ Query OK, 2 row(s) in set (0.001100s) To meet the requirements of varied use cases, some special functions have been added in TDengine. Some examples are `twa` (Time Weighted Average), `spread` (The difference between the maximum and the minimum), and `last_row` (the last row). Furthermore, continuous query is also supported in TDengine. -For detailed query syntax please refer to [Select](/taos-sql/select). +For detailed query syntax please refer to [Select](../../12-taos-sql/06-select.md). ## Aggregation among Tables @@ -81,7 +81,7 @@ taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - Query OK, 1 row(s) in set (0.002136s) ``` -Join queries are only allowed between subtables of the same STable. In [Select](/taos-sql/select), all query operations are marked as to whether they support STables or not. +Join queries are only allowed between subtables of the same STable. In [Select](../../12-taos-sql/06-select.md), all query operations are marked as to whether they support STables or not. ## Down Sampling and Interpolation @@ -128,13 +128,13 @@ In many use cases, it's hard to align the timestamp of the data collected by eac Interpolation can be performed in TDengine if there is no data in a time range. -For more details please refer to [Aggregate by Window](/taos-sql/interval). +For more details please refer to [Aggregate by Window](../../12-taos-sql/12-interval.md). ## Examples ### Query -In the section describing [Insert](/develop/insert-data/sql-writing), a database named `power` is created and some data are inserted into STable `meters`. Below sample code demonstrates how to query the data in this STable. +In the section describing [Insert](../03-insert-data/01-sql-writing.mdx), a database named `power` is created and some data are inserted into STable `meters`. Below sample code demonstrates how to query the data in this STable. diff --git a/docs/en/07-develop/07-subscribe.mdx b/docs/en/07-develop/07-subscribe.mdx index 0fde36f6cff912c82ab58e641f0f38021ea1676a..e309a33fc8f2c30c7fe2ab2a21a517029b089ab1 100644 --- a/docs/en/07-develop/07-subscribe.mdx +++ b/docs/en/07-develop/07-subscribe.mdx @@ -28,7 +28,7 @@ taos_consume taos_unsubscribe ``` -For more details about these APIs please refer to [C/C++ Connector](/reference/connector/cpp). Their usage will be introduced below using the use case of meters, in which the schema of STable and subtables from the previous section [Continuous Query](/develop/continuous-query) are used. Full sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c). +For more details about these APIs please refer to [C/C++ Connector](/reference/connector/cpp). Their usage will be introduced below using the use case of meters, in which the schema of STable and subtables from the previous section [Continuous Query](../continuous-query) are used. Full sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/examples/c/subscribe.c). If we want to get a notification and take some actions if the current exceeds a threshold, like 10A, from some meters, there are two ways: diff --git a/docs/en/12-taos-sql/12-interval.md b/docs/en/12-taos-sql/12-interval.md index acfb0de0e1521fd8c6a068497a3df7a17941524c..2d5502781081e12b008314a84101fbf4c37effd7 100644 --- a/docs/en/12-taos-sql/12-interval.md +++ b/docs/en/12-taos-sql/12-interval.md @@ -93,7 +93,7 @@ SELECT function_list FROM stb_name ::: -Aggregate by time window is also used in continuous query, please refer to [Continuous Query](/develop/continuous-query). +Aggregate by time window is also used in continuous query, please refer to [Continuous Query](../../develop/continuous-query). ## Examples diff --git a/docs/en/20-third-party/12-jupyterlab.md b/docs/en/20-third-party/12-jupyterlab.md new file mode 100644 index 0000000000000000000000000000000000000000..fbd7e530f0959740c53e48ce1d73d92ce0d6c5c5 --- /dev/null +++ b/docs/en/20-third-party/12-jupyterlab.md @@ -0,0 +1,98 @@ +--- +sidebar_label: JupyterLab +title: Connect JupyterLab to TDengine +--- + +JupyterLab is the next generation of the ubiquitous Jupyter Notebook. In this note we show you how to install the TDengine Python connector to connect to TDengine in JupyterLab. You can then insert data and perform queries against the TDengine instance within JupyterLab. + +## Install JupyterLab +Installing JupyterLab is very easy. Installation instructions can be found at: + +https://jupyterlab.readthedocs.io/en/stable/getting_started/installation.html. + +If you don't feel like clicking on the link here are the instructions. +Jupyter's preferred Python package manager is pip, so we show the instructions for pip. +You can also use **conda** or **pipenv** if you are managing Python environments. +```` +pip install jupyterlab +```` + +For **conda** you can run: +```` +conda install -c conda-forge jupyterlab +```` + +For **pipenv** you can run: +```` +pipenv install jupyterlab +pipenv shell +```` + +## Run JupyterLab +You can start JupyterLab from the command line by running: +```` +jupyter lab +```` +This will automatically launch your default browser and connect to your JupyterLab instance, usually on port 8888. + +## Install the TDengine Python connector +You can now install the TDengine Python connector as follows. + +Start a new Python kernel in JupyterLab. + +If using **conda** run the following: +```` +# Install a conda package in the current Jupyter kernel +import sys +!conda install --yes --prefix {sys.prefix} taospy +```` +If using **pip** run the following: +```` +# Install a pip package in the current Jupyter kernel +import sys +!{sys.executable} -m pip install taospy +```` + +## Connect to TDengine +You can find detailed examples to use the Python connector, in the TDengine documentation here. +Once you have installed the TDengine Python connector in your JupyterLab kernel, the process of connecting to TDengine is the same as that you would use if you weren't using JupyterLab. +Each TDengine instance, has a database called "log" which has monitoring information about the TDengine instance. +In the "log" database there is a [supertable](https://docs.tdengine.com/taos-sql/stable/) called "disks_info". + +The structure of this table is as follows: +```` +taos> desc disks_info; + Field | Type | Length | Note | +================================================================================= + ts | TIMESTAMP | 8 | | + datadir_l0_used | FLOAT | 4 | | + datadir_l0_total | FLOAT | 4 | | + datadir_l1_used | FLOAT | 4 | | + datadir_l1_total | FLOAT | 4 | | + datadir_l2_used | FLOAT | 4 | | + datadir_l2_total | FLOAT | 4 | | + dnode_id | INT | 4 | TAG | + dnode_ep | BINARY | 134 | TAG | +Query OK, 9 row(s) in set (0.000238s) +```` + +The code below is used to fetch data from this table into a pandas DataFrame. + +```` +import sys +import taos +import pandas + +def sqlQuery(conn): + df: pandas.DataFrame = pandas.read_sql("select * from log.disks_info limit 500", conn) + print(df) + return df + +conn = taos.connect() + +result = sqlQuery(conn) + +print(result) +```` + +TDengine has connectors for various languages including Node.js, Go, PHP and there are kernels for these languages which can be found [here](https://github.com/jupyter/jupyter/wiki/Jupyter-kernels). diff --git a/docs/en/25-application/03-immigrate.md b/docs/en/25-application/03-immigrate.md index fe67f973894d460fb017de0e1a2099b8441a4abe..806b996f6d392f68a045906da0c4e4d1536f4179 100644 --- a/docs/en/25-application/03-immigrate.md +++ b/docs/en/25-application/03-immigrate.md @@ -419,11 +419,11 @@ Note that once the installation is complete, do not immediately start the `taosd To ensure that the system can obtain the necessary information for regular operation. Please set the following vital parameters correctly on the server: -FQDN, firstEp, secondEP, dataDir, logDir, tmpDir, serverPort. For the specific meaning and setting requirements of each parameter, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)" +FQDN, firstEp, secondEP, dataDir, logDir, tmpDir, serverPort. For the specific meaning and setting requirements of each parameter, please refer to the document "[TDengine Cluster Installation and Management](../../cluster/)" Follow the same steps to set parameters on the other nodes, start the taosd service, and then add Dnodes to the cluster. -Finally, start `taos` and execute the `show dnodes` command. If you can see all the nodes that have joined the cluster, the cluster building process was successfully completed. For specific operation procedures and precautions, please refer to the document "[TDengine Cluster Installation and Management](/cluster/)". +Finally, start `taos` and execute the `show dnodes` command. If you can see all the nodes that have joined the cluster, the cluster building process was successfully completed. For specific operation procedures and precautions, please refer to the document "[TDengine Cluster Installation and Management](../../cluster/)". ## Appendix 4: Super Table Names diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md index 191e1cbcc2921b95f7e312cae2a6d84deaff65fb..095ffa037240b533f13bdd8d4f8483b06dc072a6 100644 --- a/docs/zh/02-intro.md +++ b/docs/zh/02-intro.md @@ -3,7 +3,7 @@ title: 产品简介 toc_max_heading_level: 2 --- -TDengine 是一款高性能、分布式、支持 SQL 的时序数据库 (Database),其核心代码,包括集群功能全部开源(开源协议,AGPL v3.0)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库 (Database) 功能外,TDengine 还提供[缓存](/develop/cache/)、[数据订阅](/develop/subscribe)、[流式计算](/develop/continuous-query)等大数据平台所需要的系列功能,最大程度减少研发和运维的复杂度。 +TDengine 是一款高性能、分布式、支持 SQL 的时序数据库 (Database),其核心代码,包括集群功能全部开源(开源协议,AGPL v3.0)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库 (Database) 功能外,TDengine 还提供[缓存](/develop/cache/)、[数据订阅](../develop/subscribe)、[流式计算](../develop/continuous-query)等大数据平台所需要的系列功能,最大程度减少研发和运维的复杂度。 本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。 @@ -16,9 +16,9 @@ TDengine的主要功能如下: 3. 支持[各种查询](/develop/query-data),包括聚合查询、嵌套查询、降采样查询、插值等 4. 支持[用户自定义函数](/develop/udf) 5. 支持[缓存](/develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis -6. 支持[连续查询](/develop/continuous-query)(Continuous Query) -7. 支持[数据订阅](/develop/subscribe),而且可以指定过滤条件 -8. 支持[集群](/cluster/),可以通过多节点进行水平扩展,并通过多副本实现高可靠 +6. 支持[连续查询](../develop/continuous-query)(Continuous Query) +7. 支持[数据订阅](../develop/subscribe),而且可以指定过滤条件 +8. 支持[集群](../cluster/),可以通过多节点进行水平扩展,并通过多副本实现高可靠 9. 提供[命令行程序](/reference/taos-shell),便于管理集群,检查系统状态,做即席查询 10. 提供多种数据的[导入](/operation/import)、[导出](/operation/export) 11. 支持对[TDengine 集群本身的监控](/operation/monitor) diff --git a/docs/zh/05-get-started/_pkg_install.mdx b/docs/zh/05-get-started/_pkg_install.mdx index 83c987af8bcf24a9593105b680d32a0421344d5f..85d6d7b6ede9b3bc126c6f3b3680ad76b5cd1241 100644 --- a/docs/zh/05-get-started/_pkg_install.mdx +++ b/docs/zh/05-get-started/_pkg_install.mdx @@ -10,8 +10,8 @@ TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟 -具体的安装方法,请参见[安装包的安装和卸载](/operation/pkg-install)。 +具体的安装方法,请参见[安装包的安装和卸载](../13-operation/01-pkg-install.md)。 -下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/all-downloads) +下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/all-downloads)。 -查看 Release Notes, 请点击[这里](https://github.com/taosdata/TDengine/releases) +查看 Release Notes, 请点击[这里](https://github.com/taosdata/TDengine/releases)。 diff --git a/docs/zh/05-get-started/index.md b/docs/zh/05-get-started/index.md index bdb17aef36da48da3b5500ffb733e7946beb4c2a..cc2e5df80e7a9335357d9a8f18db260c6696d3de 100644 --- a/docs/zh/05-get-started/index.md +++ b/docs/zh/05-get-started/index.md @@ -36,7 +36,7 @@ docker exec -it bash 然后就可以执行相关的 Linux 命令操作和访问 TDengine -详细操作方法请参照 [通过 Docker 快速体验 TDengine](/train-faq/docker)。 +详细操作方法请参照 [通过 Docker 快速体验 TDengine](../train-faq/docker)。 :::info 从 2.4.0.10 开始,除 taosd 以外,Docker 镜像还包含:taos、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码。启动 Docker 容器时,将同时启动 taosAdapter 和 taosd,实现对 RESTful 的支持。 diff --git a/docs/zh/07-develop/04-query-data/index.mdx b/docs/zh/07-develop/04-query-data/index.mdx index 824f36ef2f98aac227bdcaf2016d7be0a2e59328..58b6738f43909b100f59829d74f0e62cdb7ae44a 100644 --- a/docs/zh/07-develop/04-query-data/index.mdx +++ b/docs/zh/07-develop/04-query-data/index.mdx @@ -120,7 +120,7 @@ Query OK, 5 row(s) in set (0.001521s) 如果一个时间间隔里,没有采集的数据,TDengine 还提供插值计算的功能。 -语法规则细节请见 [TAOS SQL 的按时间窗口切分聚合](/taos-sql/interval) 章节。 +语法规则细节请见 [TAOS SQL 的按时间窗口切分聚合](../../taos-sql/interval) 章节。 ## 示例代码 diff --git a/docs/zh/12-taos-sql/12-interval.md b/docs/zh/12-taos-sql/12-interval.md index b0619ea5ce3759e9bca1234b76e2a16176511547..ac273a6d8a85d7758ffece890120b92c40815555 100644 --- a/docs/zh/12-taos-sql/12-interval.md +++ b/docs/zh/12-taos-sql/12-interval.md @@ -93,7 +93,7 @@ SELECT function_list FROM stb_name ::: -时间聚合也常被用于连续查询场景,可以参考文档 [连续查询(Continuous Query)](/develop/continuous-query)。 +时间聚合也常被用于连续查询场景,可以参考文档 [连续查询(Continuous Query)](../../develop/continuous-query)。 ## 示例 diff --git a/docs/zh/21-tdinternal/01-arch.md b/docs/zh/21-tdinternal/01-arch.md index 507ccea6291cd2cdd5d2477e151ee58d2865e0be..f1fd75d4f3ea22dd64cb7dce95170aa3074d3068 100644 --- a/docs/zh/21-tdinternal/01-arch.md +++ b/docs/zh/21-tdinternal/01-arch.md @@ -171,7 +171,7 @@ Vnode 会保持一个数据版本号(version),对内存数据进行持久 3. 在线的虚拟节点数过半,而且有虚拟节点是 slave 的话,该虚拟节点自动成为 master 4. 对于 2 和 3,如果多个虚拟节点满足成为 master 的要求,那么虚拟节点组的节点列表里,最前面的选为 master -更多的关于数据复制的流程,请见[《TDengine 2.0 数据复制模块设计》](/tdinternal/replica/)。 +更多的关于数据复制的流程,请见[《TDengine 2.0 数据复制模块设计》](../replica/)。 ### 同步复制 diff --git a/docs/zh/21-tdinternal/03-taosd.md b/docs/zh/21-tdinternal/03-taosd.md index 9470311f94a3247bb51db2b2f0d1ad1876d04995..92677e57007128949dd1e9c954870bc7a9b419b8 100644 --- a/docs/zh/21-tdinternal/03-taosd.md +++ b/docs/zh/21-tdinternal/03-taosd.md @@ -94,7 +94,7 @@ TSDB 中存储的元数据包含属于其所在的 vnode 中表的类型,schem 该模块实现数据的多副本复制,包括 vnode 与 mnode 的数据复制,支持异步和同步两种复制方式,以满足 meta data 与时序数据不同复制的需求。因为它为 mnode 与 vnode 共享,系统为 mnode 副本预留了一个特殊的 vgroup ID:1。因此 vnode group 的 ID 是从 2 开始的。 -每个 vnode/mnode 模块实例会有一对应的 sync 模块实例,他们是一一对应的。详细设计请见[TDengine 2.0 数据复制模块设计](/tdinternal/replica/) +每个 vnode/mnode 模块实例会有一对应的 sync 模块实例,他们是一一对应的。详细设计请见[TDengine 2.0 数据复制模块设计](../replica/) ## WAL 模块 diff --git a/docs/zh/25-application/03-immigrate.md b/docs/zh/25-application/03-immigrate.md index d1c9caea099b79494784aa1122e89d7b4d412464..6978d47754b648b6c27ec11cbb9a7278d27ebf47 100644 --- a/docs/zh/25-application/03-immigrate.md +++ b/docs/zh/25-application/03-immigrate.md @@ -407,11 +407,11 @@ TDengine 提供了丰富的帮助文档说明集群安装、部署的诸多方 为确保系统能够正常获取运行的必要信息。请在服务端正确设置以下关键参数: -FQDN、firstEp、secondEP、dataDir、logDir、tmpDir、serverPort。各参数的具体含义及设置的要求,可参见文档《[TDengine 集群安装、管理](/cluster/)》 +FQDN、firstEp、secondEP、dataDir、logDir、tmpDir、serverPort。各参数的具体含义及设置的要求,可参见文档《[TDengine 集群安装、管理](../../cluster/)》 按照相同的步骤,在需要运行的节点上设置参数,并启动 `taosd` 服务,然后添加 Dnode 到集群中。 -最后启动 `taos` 命令行程序,执行命令 `show dnodes`,如果能看到所有的加入集群的节点,那么集群顺利搭建完成。具体的操作流程及注意事项,请参阅文档《[TDengine 集群安装、管理](/cluster/)》 +最后启动 `taos` 命令行程序,执行命令 `show dnodes`,如果能看到所有的加入集群的节点,那么集群顺利搭建完成。具体的操作流程及注意事项,请参阅文档《[TDengine 集群安装、管理](../../cluster/)》 ## 附录 4: 超级表名称 diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index c753ee3160dd452bfa62038d5048ba24c816bdd3..07aff292f8319f1615b24dfe8907af9164ce8bcb 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -282,7 +282,7 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { } // if return true, send probe connection msg to sever ok -bool sendProbeConnMsg(SSqlObj* pSql, int64_t stime) { +bool sendProbeConnMsg(SSqlObj* pSql, int64_t stime, bool *pReqOver) { if(stime == 0) { // not start , no need probe tscInfo("PROBE 0x%" PRIx64 " not start, no need probe.", pSql->self); @@ -318,8 +318,9 @@ bool sendProbeConnMsg(SSqlObj* pSql, int64_t stime) { return true; } - bool ret = rpcSendProbe(pSql->rpcRid, pSql->pPrevContext); - tscInfo("PROBE 0x%" PRIx64 " send probe msg, ret=%d rpcRid=0x%" PRIx64, pSql->self, ret, pSql->rpcRid); + bool ret = rpcSendProbe(pSql->rpcRid, pSql->pPrevContext, pReqOver); + if (!(*pReqOver)) + tscInfo("PROBE 0x%" PRIx64 " send probe msg, ret=%d rpcRid=0x%" PRIx64, pSql->self, ret, pSql->rpcRid); return ret; } @@ -335,16 +336,22 @@ void checkBrokenQueries(STscObj *pTscObj) { } bool kill = false; + bool reqOver = false; int32_t numOfSub = pSql->subState.numOfSub; tscInfo("PROBE 0x%" PRIx64 " start checking sql alive, numOfSub=%d sql=%s stime=%" PRId64 " alive=%" PRId64 " rpcRid=0x%" PRIx64 \ ,pSql->self, numOfSub, pSql->sqlstr == NULL ? "" : pSql->sqlstr, pSql->stime, pSql->lastAlive, pSql->rpcRid); if (numOfSub == 0) { // no sub sql - if(!sendProbeConnMsg(pSql, pSql->stime)) { + if(!sendProbeConnMsg(pSql, pSql->stime, &reqOver)) { // need kill tscInfo("PROBE 0x%" PRIx64 " need break link done. rpcRid=0x%" PRIx64, pSql->self, pSql->rpcRid); kill = true; } + + if (reqOver) { + // current request is finished over, so upate alive to now + pSql->lastAlive = taosGetTimestampMs(); + } } else { // lock subs pthread_mutex_lock(&pSql->subState.mutex); @@ -354,13 +361,18 @@ void checkBrokenQueries(STscObj *pTscObj) { SSqlObj *pSubSql = pSql->pSubs[i]; if(pSubSql) { tscInfo("PROBE 0x%" PRIx64 " sub sql app is 0x%" PRIx64, pSql->self, pSubSql->self); - if(!sendProbeConnMsg(pSubSql, pSql->stime)) { + if(!sendProbeConnMsg(pSubSql, pSql->stime, &reqOver)) { // need kill tscInfo("PROBE 0x%" PRIx64 " i=%d sub app=0x%" PRIx64 " need break link done. rpcRid=0x%" PRIx64, pSql->self, i, pSubSql->self, pSubSql->rpcRid); kill = true; break; } } + + if (reqOver) { + // current request is finished over, so upate alive to now + pSubSql->lastAlive = taosGetTimestampMs(); + } } } // unlock @@ -448,13 +460,14 @@ int tscSendMsgToServer(SSqlObj *pSql) { } - if(rpcSendRequest(pObj->pRpcObj->pDnodeConn, &pSql->epSet, &rpcMsg, &pSql->rpcRid)) { - if(pSql->cmd.command < TSDB_SQL_HB) + if(rpcSendRequest(pObj->pRpcObj->pDnodeConn, &pSql->epSet, &rpcMsg, &pSql->rpcRid) != BOOL_FALSE) { + if(pSql->cmd.command == TSDB_SQL_SELECT) rpcSaveSendInfo(pSql->rpcRid, &pSql->pPrevContext); return TSDB_CODE_SUCCESS; } - return TSDB_CODE_SUCCESS; + tscError("0x%"PRIx64" rpc send data failed. msg=%s", pSql->self, taosMsg[pSql->cmd.msgType]); + return TSDB_CODE_TSC_SEND_DATA_FAILED; } // handle three situation diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index e44f88189fb5f7c353228bc26cabee40cf38aff7..b3d18f50114cd88e75e559323fed8c7d6f934289 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -149,7 +149,6 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char* goto fail; } - strtolower(pSql->sqlstr, pSql->sqlstr); pRes->qId = 0; pRes->numOfRows = 1; pCmd->resColumnId = TSDB_RES_COL_ID; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index ab34d99bfc4ba339c5200292d178b96f9b42488a..09574e741fe751a1aa7784d8774fa6334a1f1b2c 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -3296,7 +3296,8 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { // the param may be null, since it may be done by other query threads. and the asyncOnError may enter in this // function while kill query by a user. if (param == NULL) { - assert(code != TSDB_CODE_SUCCESS); + if(code != TSDB_CODE_SUCCESS) + tscError("tscRetrieveDataRes param is NULL, error code=%d", code); return; } diff --git a/src/inc/taos.h b/src/inc/taos.h index 5cb0420fe2e8a19d07bc08ebc89fccfc2af968d6..44d83969a8090fc1e98fcec08065dd1834cf3f75 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -48,6 +48,12 @@ typedef void **TAOS_ROW; #define TSDB_DATA_TYPE_UBIGINT 14 // 8 bytes #define TSDB_DATA_TYPE_JSON 15 // json string +typedef enum { + BOOL_FALSE = 0, + BOOL_TRUE = 1, + BOOL_ASYNC = 2 //request is processed by async for another thread, not now true or false +} TBOOL; + typedef enum { TSDB_OPTION_LOCALE, TSDB_OPTION_CHARSET, diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 9f51952d28abb4dc5b589126b6a6358af604de8b..fc387d331b07fcf407ade3305d1874ef2b5c8c31 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -118,6 +118,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TSC_RES_TOO_MANY TAOS_DEF_ERROR_CODE(0, 0x0227) //"Result set too large to be output") #define TSDB_CODE_TSC_INVALID_SCHEMA_VERSION TAOS_DEF_ERROR_CODE(0, 0x0228) //"invalid table schema version") #define TSDB_CODE_TSC_TOO_MANY_SML_LINES TAOS_DEF_ERROR_CODE(0, 0x0229) //"too many lines in batch") +#define TSDB_CODE_TSC_SEND_DATA_FAILED TAOS_DEF_ERROR_CODE(0, 0x0230) //"Client send request data error" // mnode #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed" diff --git a/src/inc/trpc.h b/src/inc/trpc.h index 59908253c2e825c0d26a27332f3ede5a90aa0060..fe061eb4f20fa550299316c028d99cdd9d6e8bd7 100644 --- a/src/inc/trpc.h +++ b/src/inc/trpc.h @@ -85,7 +85,7 @@ void rpcClose(void *); void *rpcMallocCont(int contLen); void rpcFreeCont(void *pCont); void *rpcReallocCont(void *ptr, int contLen); -bool rpcSendRequest(void *thandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid); +TBOOL rpcSendRequest(void *thandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid); void rpcSendResponse(const SRpcMsg *pMsg); void rpcSendRedirectRsp(void *pConn, const SRpcEpSet *pEpSet); int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo); @@ -94,7 +94,7 @@ int rpcReportProgress(void *pConn, char *pCont, int contLen); void rpcCancelRequest(int64_t rid); int32_t rpcUnusedSession(void * rpcInfo, bool bLock); // send rpc Refid connection probe alive message -bool rpcSendProbe(int64_t rpcRid, void* pPrevContext); +bool rpcSendProbe(int64_t rpcRid, void* pPrevContext, bool *pReqOver); // after sql request send , save conn info bool rpcSaveSendInfo(int64_t rpcRid, void** ppContext); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 3edd19e016cd08e2ef1e8f7f3a5dfbc7c3252e79..6ba582138c2aecbad0c0dde95177b67a4a0d72fb 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1704,7 +1704,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul while (1) { int32_t prevEndPos = (forwardStep - 1) * step + startPos; startPos = getNextQualifiedWindow(pQueryAttr, &nextWin, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos); - if (startPos < 0) { + if (startPos < 0 || startPos >= pSDataBlock->info.rows) { break; } diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index bcc759f845e7c0e4c110854953d674f5a917cfcf..6566bf9e79f3f58557a80c037e99027cd19753ba 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -202,7 +202,7 @@ static SRpcConn *rpcAllocateClientConn(SRpcInfo *pRpc); static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv); static SRpcConn *rpcGetConnObj(SRpcInfo *pRpc, int sid, SRecvInfo *pRecv); -static bool rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext); +static TBOOL rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext); static void rpcSendQuickRsp(SRpcConn *pConn, int32_t code); static void rpcSendErrorMsgToPeer(SRecvInfo *pRecv, int32_t code); static bool rpcSendMsgToPeer(SRpcConn *pConn, void *data, int dataLen); @@ -394,7 +394,7 @@ void *rpcReallocCont(void *ptr, int contLen) { return start + sizeof(SRpcReqContext) + sizeof(SRpcHead); } -bool rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *pRid) { +TBOOL rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *pRid) { SRpcInfo *pRpc = (SRpcInfo *)shandle; SRpcReqContext *pContext; @@ -1384,7 +1384,7 @@ static void rpcSendErrorMsgToPeer(SRecvInfo *pRecv, int32_t code) { return; } -static bool rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) { +static TBOOL rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) { SRpcHead *pHead = rpcHeadFromCont(pContext->pCont); char *msg = (char *)pHead; int msgLen = rpcMsgLenFromCont(pContext->contLen); @@ -1394,8 +1394,9 @@ static bool rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) { SRpcConn *pConn = rpcSetupConnToServer(pContext); if (pConn == NULL) { pContext->code = terrno; + // in rpcProcessConnError if numOfTry over limit, could call rpcNotifyClient to stop query taosTmrStart(rpcProcessConnError, 1, pContext, pRpc->tmrCtrl); - return false; + return BOOL_ASYNC; } pContext->pConn = pConn; @@ -1436,7 +1437,16 @@ static bool rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) { taosTmrReset(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl, &pConn->pTimer); rpcUnlockConn(pConn); - return ret; + + if(ret == BOOL_FALSE) { + // try next ip again + pContext->code = terrno; + // in rpcProcessConnError if numOfTry over limit, could call rpcNotifyClient to stop query + taosTmrStart(rpcProcessConnError, 1, pContext, pRpc->tmrCtrl); + return BOOL_ASYNC; + } + + return BOOL_TRUE; } static bool rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) { @@ -1478,8 +1488,6 @@ static void rpcProcessConnError(void *param, void *id) { return; } - tDebug("%s %p, connection error happens", pRpc->label, pContext->ahandle); - if (pContext->numOfTry >= pContext->epSet.numOfEps || pContext->msgType == TSDB_MSG_TYPE_FETCH) { rpcMsg.msgType = pContext->msgType+1; rpcMsg.ahandle = pContext->ahandle; @@ -1487,9 +1495,11 @@ static void rpcProcessConnError(void *param, void *id) { rpcMsg.pCont = NULL; rpcMsg.contLen = 0; + tWarn("%s %p, connection error. notify client query over. numOfTry=%d msgType=%d", pRpc->label, pContext->ahandle, pContext->numOfTry, pContext->msgType); rpcNotifyClient(pContext, &rpcMsg); } else { // move to next IP + tWarn("%s %p, connection error. retry to send request again. numOfTry=%d msgType=%d", pRpc->label, pContext->ahandle, pContext->numOfTry, pContext->msgType); pContext->epSet.inUse++; pContext->epSet.inUse = pContext->epSet.inUse % pContext->epSet.numOfEps; rpcSendReqToServer(pRpc, pContext); @@ -1805,7 +1815,7 @@ bool doRpcSendProbe(SRpcConn *pConn) { } // send server syn -bool rpcSendProbe(int64_t rpcRid, void* pPrevContext) { +bool rpcSendProbe(int64_t rpcRid, void* pPrevContext, bool *pReqOver) { // return false can kill query bool ret = false; if(rpcRid < 0) { @@ -1828,7 +1838,10 @@ bool rpcSendProbe(int64_t rpcRid, void* pPrevContext) { // conn same if(pContext->pConn == NULL) { - tInfo("PROBE rpcRid=0x%" PRIx64 " connect obj is NULL. ", rpcRid); + tInfo("PROBE rpcRid=0x%" PRIx64 " reqContext->pConn is NULL. The req is finished.", rpcRid); + if (pReqOver) + *pReqOver = true; + ret = true; goto _END; } else if (pContext->pConn != pContext->sendInfo.pConn) { @@ -1850,7 +1863,10 @@ bool rpcSendProbe(int64_t rpcRid, void* pPrevContext) { } // send syn - ret = doRpcSendProbe(pContext->pConn); + if (!doRpcSendProbe(pContext->pConn)) { + tError("PROBE rpcRid=0x%" PRIx64 " fd=%d rpc send probe data error.", rpcRid, fd); + } + ret = true; _END: // put back req context diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 640e842de340b686fcd1bd9bd10ad0850c9d02a1..f66152988ee31543cd559c050fba82d3353aa9fa 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -125,6 +125,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PROTOCOL_TYPE, "Invalid line protocol TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PRECISION_TYPE, "Invalid timestamp precision type") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_RES_TOO_MANY, "Result set too large to be output") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_TOO_MANY_SML_LINES, "Too many lines in batch") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SEND_DATA_FAILED, "Client send request data failed") // mnode TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed") diff --git a/tests/pytest/subscribe/singlemeter.py b/tests/pytest/subscribe/singlemeter.py index 879e0a75ebdf29022990b5e2e250370620c74636..ff182f70b57241bbb73d67044e5ff0cc87fb72c5 100644 --- a/tests/pytest/subscribe/singlemeter.py +++ b/tests/pytest/subscribe/singlemeter.py @@ -68,8 +68,24 @@ class TDTestCase: tdSub.consume() tdSub.checkRows(11) + + # TS-1788: Subscribe a case sensitive table + tdLog.info("create a table and insert 10 rows.") + tdSql.execute("create table `T1`(ts timestamp, a int, b int);") + for i in range(0, 10): + tdSql.execute("insert into `T1` values (%d, %d, %d);" % (now + i, i, i)) + + sqlstr = "select * from `T1`" + topic = "topic1" + now = int(time.time() * 1000) + + tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0)) + tdSub.consume() + tdSub.checkRows(10) + tdSub.close(True) + def stop(self): tdSub.close(False) tdSql.close()