提交 a2947670 编写于 作者: sangshuduo's avatar sangshuduo

Merge branch '3.0' into docs/sangshuduo/update-readme

......@@ -18,7 +18,7 @@
注意:修改文档的分支要以`docs/`为开头,以免进行不必要的测试。
4. 创建pull request,将自己的分支合并到开发分支`3.0`,我们开发团队将尽快审核。
如遇任何问题,请添加官方微信TDengineECO。我们的团队会帮忙解决。
如遇任何问题,请添加官方微信 tdengine1。我们的团队会帮忙解决。
## 给贡献者的礼品
......@@ -48,4 +48,4 @@ TDengine 社区致力于让更多的开发者理解和使用它。
## 联系我们
如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:TDengineECO
如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:tdengine1。
......@@ -60,7 +60,7 @@ sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 li
为了在 Ubuntu/Debian 系统上编译 [taos-tools](https://github.com/taosdata/taos-tools) 需要安装如下软件:
```bash
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config libgeos-dev
```
### CentOS 7.9
......@@ -85,7 +85,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
```
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel geos geos-devel
```
#### CentOS 8/Rocky Linux
......@@ -94,7 +94,7 @@ sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson
sudo yum install -y epel-release
sudo yum install -y dnf-plugins-core
sudo yum config-manager --set-enabled powertools
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel geos geos-devel
```
注意:由于 snappy 缺乏 pkg-config 支持(参考 [链接](https://github.com/google/snappy/pull/86)),会导致 cmake 提示无法发现 libsnappy,实际上工作正常。
......
......@@ -68,7 +68,7 @@ sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 li
To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed.
```bash
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config libgeos-dev
```
### CentOS 7.9
......@@ -91,7 +91,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
#### CentOS 7.9
```
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel geos geos-devel
```
#### CentOS 8/Rocky Linux
......@@ -100,7 +100,7 @@ sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson
sudo yum install -y epel-release
sudo yum install -y dnf-plugins-core
sudo yum config-manager --set-enabled powertools
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel geos geos-devel
```
Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it leads a cmake prompt libsnappy not found. But snappy still works well.
......
cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE ON)
set(CMAKE_VERBOSE_MAKEFILE OFF)
set(TD_BUILD_TAOSA_INTERNAL FALSE)
#set output directory
......@@ -119,6 +119,9 @@ ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
MESSAGE(STATUS "Compile with Address Sanitizer!")
ELSEIF (${BUILD_RELEASE})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
......
......@@ -64,6 +64,13 @@ IF(${TD_WINDOWS})
ON
)
MESSAGE("build geos Win32")
option(
BUILD_GEOS
"If build geos on Windows"
ON
)
ELSEIF (TD_DARWIN_64)
IF(${BUILD_TEST})
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
......@@ -171,3 +178,8 @@ option(
ON
)
option(
BUILD_RELEASE
"If build release version"
OFF
)
......@@ -57,6 +57,8 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
SET(TD_DARWIN TRUE)
SET(OSTYPE "macOS")
ADD_DEFINITIONS("-DDARWIN -Wno-tautological-pointer-compare")
INCLUDE_DIRECTORIES(/usr/local/include)
LINK_DIRECTORIES(/usr/local/lib)
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64")
MESSAGE("Current system arch is arm64")
......
......@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.0.4.1")
SET(TD_VER_NUMBER "3.0.4.3")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
# geos
ExternalProject_Add(geos
GIT_REPOSITORY https://github.com/libgeos/geos.git
GIT_TAG 3.11.2
SOURCE_DIR "${TD_CONTRIB_DIR}/geos"
BINARY_DIR ""
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
\ No newline at end of file
# rocksdb
ExternalProject_Add(rocksdb
GIT_REPOSITORY https://github.com/taosdata-contrib/rocksdb.git
GIT_TAG v6.23.3
GIT_REPOSITORY https://github.com/facebook/rocksdb.git
GIT_TAG v8.1.1
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
......
......@@ -2,6 +2,7 @@
# stub
ExternalProject_Add(stub
GIT_REPOSITORY https://github.com/coolxv/cpp-stub.git
GIT_TAG 5e903b8e
GIT_SUBMODULES "src"
SOURCE_DIR "${TD_CONTRIB_DIR}/cpp-stub"
BINARY_DIR "${TD_CONTRIB_DIR}/cpp-stub/src"
......
......@@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG ae8d51c
GIT_TAG 283b50d
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
......
......@@ -134,6 +134,11 @@ if(${BUILD_ADDR2LINE})
endif(NOT ${TD_WINDOWS})
endif(${BUILD_ADDR2LINE})
# geos
if(${BUILD_GEOS})
cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif()
# download dependencies
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
......@@ -470,6 +475,15 @@ if(${BUILD_ADDR2LINE})
endif(NOT ${TD_WINDOWS})
endif(${BUILD_ADDR2LINE})
# geos
if(${BUILD_GEOS})
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
add_subdirectory(geos EXCLUDE_FROM_ALL)
target_include_directories(
geos_c
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
)
endif(${BUILD_GEOS})
# ================================================================================================
# Build test
......
......@@ -4,7 +4,7 @@ if(${BUILD_DOCS})
find_package(Doxygen)
if (DOXYGEN_FOUND)
# Build the doc
set(DOXYGEN_IN ${TD_SOURCE_DIR}/docs/Doxyfile.in)
set(DOXYGEN_IN ${TD_SOURCE_DIR}/docs/doxgen/Doxyfile.in)
set(DOXYGEN_OUT ${CMAKE_BINARY_DIR}/Doxyfile)
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
......
......@@ -5,7 +5,7 @@ description: This website contains the user manuals for TDengine, an open-source
slug: /
---
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. Its written mainly for architects, developers, and system administrators.
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It's written mainly for architects, developers, and system administrators.
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
......
......@@ -57,7 +57,7 @@ By making full use of [characteristics of time series data](https://tdengine.com
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengines core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine's core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
......@@ -109,8 +109,8 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
| Very large total processing capacity | | | √ | TDengines cluster functions can easily improve processing capacity via multi-server coordination. |
| Extremely high-speed data processing | | | √ | TDengines storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
| Very large total processing capacity | | | √ | TDengine's cluster functions can easily improve processing capacity via multi-server coordination. |
| Extremely high-speed data processing | | | √ | TDengine's storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
### System Maintenance Requirements
......
......@@ -127,7 +127,7 @@ To make full use of time-series data characteristics, TDengine adopts a strategy
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and wont build the index on any metrics stored. Column wise storage is used.
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won't build the index on any metrics stored. Column wise storage is used.
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
......
......@@ -12,4 +12,4 @@ When using REST connection, the feature of bulk pulling can be enabled if the si
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
```
More configuration about connectionplease refer to [Java Connector](/reference/connector/java)
More configuration about connection, please refer to [Java Connector](/reference/connector/java)
```php title="原生连接"
```php title=""native"
{{#include docs/examples/php/connect.php}}
```
......@@ -33,7 +33,7 @@ There are two ways for a connector to establish connections to TDengine:
For REST and native connections, connectors provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users.
Key differences
Key differences:
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc.
......@@ -83,7 +83,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version>
<version>3.2.1</version>
</dependency>
```
......@@ -198,7 +198,7 @@ The sample code below are based on dotnet6.0, they may need to be adjusted if yo
<TabItem label="R" value="r">
1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.0.0/).
2. Install the dependency package `RJDBC`
2. Install the dependency package `RJDBC`:
```R
install.packages("RJDBC")
......@@ -213,7 +213,7 @@ If the client driver (taosc) is already installed, then the C connector is alrea
</TabItem>
<TabItem label="PHP" value="php">
**Download Source Code Package and Unzip**
**Download Source Code Package and Unzip: **
```shell
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
......@@ -223,13 +223,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
**Non-Swoole Environment**
**Non-Swoole Environment: **
```shell
phpize && ./configure && make -j && make install
```
**Specify TDengine Location**
**Specify TDengine Location: **
```shell
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
......@@ -238,7 +238,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
> `--with-tdengine-dir=` is followed by the TDengine installation location.
> This way is useful in case TDengine location can't be found automatically or macOS.
**Swoole Environment**
**Swoole Environment: **
```shell
phpize && ./configure --enable-swoole && make -j && make install
......
......@@ -69,7 +69,7 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
## Query Examples
If you want query the data of `location=California.LosAngeles,groupid=2`here is the query SQL:
If you want query the data of `location=California.LosAngeles,groupid=2`, here is the query SQL:
```sql
SELECT * FROM meters WHERE location = "California.LosAngeles" AND groupid = 2;
......
......@@ -84,7 +84,7 @@ Query OK, 4 row(s) in set (0.005399s)
## Query Examples
If you want query the data of `location=California.LosAngeles groupid=3`here is the query SQL:
If you want query the data of `location=California.LosAngeles groupid=3`, here is the query SQL:
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
......
......@@ -97,7 +97,7 @@ Query OK, 2 row(s) in set (0.004076s)
## Query Examples
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}here is the query SQL:
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL:
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
......
......@@ -49,7 +49,7 @@ If the data source is Kafka, then the application program is a consumer of Kafka
On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config).
## Sample Programs
......@@ -98,7 +98,7 @@ The main Program is responsible for:
3. Start reading threads
4. Output writing speed every 10 seconds
The main program provides 4 parameters for tuning
The main program provides 4 parameters for tuning:
1. The number of reading threads, default value is 1
2. The number of writing threads, default value is 2
......@@ -192,7 +192,7 @@ TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
If you want to launch the sample program on a remote server, please follow below steps:
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java`
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java`:
```
mvn package
```
......@@ -385,7 +385,7 @@ SQLWriter class encapsulates the logic of composing SQL and writing data. Please
pip3 install faster-fifo
```
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py``sql_writer.py` and `mockdatasource.py`.
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py`, `sql_writer.py`, and `mockdatasource.py`.
4. Execute the program
......
### python Kafka 客户端
### python Kafka client
For python kafka client, please refer to [kafka client](https://cwiki.apache.org/confluence/display/KAFKA/Clients#Clients-Python). In this document, we use [kafka-python](http://github.com/dpkp/kafka-python).
......@@ -88,7 +88,7 @@ In addition to python's built-in multithreading and multiprocessing library, we
<details>
<summary>kafka_example_consumer</summary>
`kafka_example_consumer` is `consumer`which is responsible for consuming data from kafka and writing it to TDengine.
`kafka_example_consumer` is `consumer`, which is responsible for consuming data from kafka and writing it to TDengine.
```py
{{#include docs/examples/python/kafka_example_consumer.py}}
......
......@@ -20,10 +20,10 @@ import CAsync from "./_c_async.mdx";
## Introduction
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
- Query on single column or multiple columns
- Filter on tags or data columns>, <, =, <\>, like
- Filter on tags or data columns: >, <, =, <\>, like
- Grouping of results: `Group By` - Sorting of results: `Order By` - Limit the number of results: `Limit/Offset`
- Windowed aggregate queries for time windows (interval), session windows (session), and state windows (state_window)
- Arithmetic on columns of numeric types or aggregate results
......@@ -160,7 +160,7 @@ In the section describing [Insert](/develop/insert-data/sql-writing), a database
:::note
1. With either REST connection or native connection, the above sample code works well.
2. Please note that `use db` can't be used in case of REST connection because it's stateless.
2. Please note that `use db` can't be used in case of REST connection because it's stateless. You can specify the database name by either the REST endpoint's parameter or <db_name>.<table_name> in the SQL command.
:::
......
......@@ -285,10 +285,10 @@ You configure the following parameters when creating a consumer:
| Parameter | Type | Description | Remarks |
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
| `td.connect.ip` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
| `td.connect.user` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
| `td.connect.pass` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
| `td.connect.port` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
| `td.connect.ip` | string | IP address of the server side | |
| `td.connect.user` | string | User Name | |
| `td.connect.pass` | string | Password | |
| `td.connect.port` | string | Port of the server side | |
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
| `client.id` | string | Client ID | Maximum length: 192. |
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
......@@ -325,6 +325,7 @@ Java programs use the following parameters:
| Parameter | Type | Description | Remarks |
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
| `td.connect.type` | string | connection type: "jni" means native connection, "ws" means websocket connection, the default is "jni" |
| `bootstrap.servers` | string |Connection address, such as `localhost:6030` |
| `value.deserializer` | string | Value deserializer; to use this method, implement the `com.taosdata.jdbc.tmq.Deserializer` interface or inherit the `com.taosdata.jdbc.tmq.ReferenceDeserializer` type |
| `value.deserializer.encoding` | string | Specify the encoding for string deserialization | |
......@@ -399,22 +400,6 @@ from taos.tmq import Consumer
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
```
Python programs use the following parameters:
| Parameter | Type | Description | Remarks |
|:---------:|:----:|:-----------:|:-------:|
| `td.connect.ip` | string | Used in establishing a connection||
| `td.connect.user` | string | Used in establishing a connection||
| `td.connect.pass` | string | Used in establishing a connection||
| `td.connect.port` | string | Used in establishing a connection||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192 |
| `client.id` | string | Client ID | Maximum length: 192 |
| `msg.with.table.name` | string | Specify whether to deserialize table names from messages | pecify `true` or `false` |
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
</TabItem>
<TabItem label="Node.JS" value="Node.JS">
......
......@@ -252,9 +252,9 @@ create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId
```
Create the UDF:
```bash
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
```
Use the UDF in the query
Use the UDF in the query:
```bash
select max_vol(vol1,vol2,vol3,deviceid) from battery;
```
......@@ -271,9 +271,9 @@ select max_vol(vol1,vol2,vol3,deviceid) from battery;
## Implement a UDF in Python
Implement the specified interface functions when implementing a UDF in Python.
- implement `process` function for the scalar UDF
- implement `start`, `reduce`, `finish` for the aggregate UDF
- implement `init` for initialization and `destroy` for termination
- implement `process` function for the scalar UDF.
- implement `start`, `reduce`, `finish` for the aggregate UDF.
- implement `init` for initialization and `destroy` for termination.
### Implement a Scalar UDF in Python
......
此差异已折叠。
......@@ -72,8 +72,8 @@ database_option: {
- 0: The database can contain multiple supertables.
- 1: The database can contain only one supertable.
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
- TABLE_PREFIX The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
- TABLE_SUFFIXThe suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
- TABLE_SUFFIX: The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
......
......@@ -82,7 +82,7 @@ One or multiple rows can be inserted into multiple tables in a single SQL statem
```sql
INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31;
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
```
## Automatically Create Table When Inserting
......
......@@ -373,7 +373,7 @@ FROM temp_stable t1, temp_stable t2
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
```
For sub-table and super table
For sub-table and super table:
```sql
SELECT *
......
......@@ -6,14 +6,14 @@ description: Use Tag Index to Improve Query Performance
## Introduction
Prior to TDengine 3.0.3.0 (excluded)only one index is created by default on the first tag of each super table, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
Prior to TDengine 3.0.3.0 (excluded), only one index is created by default on the first tag of each super table, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
## Syntax
1. The syntax of creating an index
```sql
CREATE INDEX index_name ON tbl_name (tagColName
CREATE INDEX index_name ON tbl_name (tagColName)
```
In the above statement, `index_name` if the name of the index, `tbl_name` is the name of the super table,`tagColName` is the name of the tag on which the index is being created. `tagColName` can be any type supported by TDengine.
......
......@@ -434,7 +434,7 @@ TO_ISO8601(expr [, timezone])
**More explanations**:
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm] For example, TO_ISO8601(1, "+00:00").
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]. For example, TO_ISO8601(1, "+00:00").
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
......@@ -626,7 +626,7 @@ algo_type: {
**Applicable table types**: standard tables and supertables
**Explanations**
**Explanations**:
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
......@@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r
ELAPSED(ts_primary_key [, time_unit])
```
**Description**`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
**Return value type**: Double if the input value is not NULL;
......@@ -680,7 +680,7 @@ ELAPSED(ts_primary_key [, time_unit])
**Applicable tables**: table, STable, outer in nested query
**Explanations**
**Explanations**:
- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be:
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
......@@ -758,7 +758,7 @@ SUM(expr)
HYPERLOGLOG(expr)
```
**Description**
**Description**:
The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge.
However, when the data volume is very small, the result may be not accurate, it's recommended to use `select count(data) from (select unique(col) as data from table)` in this case.
......@@ -772,10 +772,10 @@ HYPERLOGLOG(expr)
### HISTOGRAM
```sql
HISTOGRAM(exprbin_type, bin_description, normalized)
HISTOGRAM(expr, bin_type, bin_description, normalized)
```
**Description**Returns count of data points in user-specified ranges.
**Description**: Returns count of data points in user-specified ranges.
**Return value type** If normalized is set to 1, a DOUBLE is returned; otherwise a BIGINT is returned
......@@ -783,18 +783,18 @@ HISTOGRAM(expr,bin_type, bin_description, normalized)
**Applicable table types**: table, STable
**Explanations**
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"
- bin_description: parameter to describe how to generate bucketscan be in the following JSON formats for each bin_type respectively:
**Explanations**:
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin".
- bin_description: parameter to describe how to generate buckets can be in the following JSON formats for each bin_type respectively:
- "user_input": "[1, 3, 5, 7]":
User specified bin values.
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins.
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated set of bins.
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins.
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated range of bins.
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1.
......@@ -867,10 +867,16 @@ FIRST(expr)
### INTERP
```sql
INTERP(expr)
INTERP(expr [, ignore_null_values])
ignore_null_values: {
0
| 1
}
```
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. The value of `ignore_null_values` can be 0 or 1, 1 means null values are ignored. The default value of this parameter is 0.
**Return value type**: Same as the column being operated upon
......@@ -1107,7 +1113,7 @@ ignore_negative: {
**More explanation**:
- It can be used together with `PARTITION BY tbname` against a STable.
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from.
### DIFF
......@@ -1131,7 +1137,7 @@ ignore_negative: {
**More explanation**:
- The number of result rows is the number of rows subtracted by one, no output for the first row
- It can be used together with a selected column. For example: select \_rowts, DIFF() from
- It can be used together with a selected column. For example: select \_rowts, DIFF() from.
### IRATE
......@@ -1183,7 +1189,7 @@ STATECOUNT(expr, oper, val)
**Applicable parameter values**:
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
- val Numeric types
- val: Numeric types
**Return value type**: Integer
......@@ -1210,7 +1216,7 @@ STATEDURATION(expr, oper, val, unit)
**Applicable parameter values**:
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
- val Numeric types
- val: Numeric types
- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default.
**Return value type**: Integer
......
---
sidebar_label: Geometry Functions
title: Geometry Functions
toc_max_heading_level: 4
---
## Geometry Input Functions
Geometry input functions create geometry data from WTK.
### ST_GeomFromText
```sql
ST_GeomFromText(VARCHAR WKT expr)
```
**Description**: Return a specified GEOMETRY value from Well-Known Text representation (WKT).
**Return value type**: GEOMETRY
**Applicable data types**: VARCHAR
**Applicable table types**: standard tables and supertables
**Explanations**
- The input can be one of WTK string, like POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, GEOMETRYCOLLECTION.
- The output is a GEOMETRY data type, internal defined as binary string.
## Geometry Output Functions
Geometry output functions convert geometry data into WTK.
### ST_AsText
```sql
ST_AsText(GEOMETRY geom)
```
**Description**: Return a specified Well-Known Text representation (WKT) value from GEOMETRY data.
**Return value type**: VARCHAR
**Applicable data types**: GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**
- The output can be one of WTK string, like POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, GEOMETRYCOLLECTION.
## Geometry Relationships Functions
Geometry relationships functions determine spatial relationships between geometries.
### ST_Intersects
```sql
ST_Intersects(GEOMETRY geomA, GEOMETRY geomB)
```
**Description**: Compares two geometries and returns true if they intersect.
**Return value type**: BOOL
**Applicable data types**: GEOMETRY, GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**
- Geometries intersect if they have any point in common.
### ST_Equals
```sql
ST_Equals(GEOMETRY geomA, GEOMETRY geomB)
```
**Description**: Returns TRUE if the given geometries are "spatially equal".
**Return value type**: BOOL
**Applicable data types**: GEOMETRY, GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**
- 'Spatially equal' means ST_Contains(A,B) = true and ST_Contains(B,A) = true, and the ordering of points can be different but represent the same geometry structure.
### ST_Touches
```sql
ST_Touches(GEOMETRY geomA, GEOMETRY geomB)
```
**Description**: Returns TRUE if A and B intersect, but their interiors do not intersect.
**Return value type**: BOOL
**Applicable data types**: GEOMETRY, GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**
- A and B have at least one point in common, and the common points lie in at least one boundary.
- For Point/Point inputs the relationship is always FALSE, since points do not have a boundary.
### ST_Covers
```sql
ST_Covers(GEOMETRY geomA, GEOMETRY geomB)
```
**Description**: Returns TRUE if every point in Geometry B lies inside (intersects the interior or boundary of) Geometry A.
**Return value type**: BOOL
**Applicable data types**: GEOMETRY, GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**
- A covers B means no point of B lies outside (in the exterior of) A.
### ST_Contains
```sql
ST_Contains(GEOMETRY geomA, GEOMETRY geomB)
```
**Description**: Returns TRUE if geometry A contains geometry B.
**Return value type**: BOOL
**Applicable data types**: GEOMETRY, GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**
- A contains B if and only if all points of B lie inside (i.e. in the interior or boundary of) A (or equivalently, no points of B lie in the exterior of A), and the interiors of A and B have at least one point in common.
### ST_ContainsProperly
```sql
ST_ContainsProperly(GEOMETRY geomA, GEOMETRY geomB)
```
**Description**: Returns TRUE if every point of B lies inside A.
**Return value type**: BOOL
**Applicable data types**: GEOMETRY, GEOMETRY
**Applicable table types**: standard tables and supertables
**Explanations**
- There is no point of B lies in the the boundary or exterior of A.
......@@ -69,19 +69,20 @@ These pseudocolumns occur after the aggregation clause.
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
1. NONE: No fill (the default fill mode)
2. VALUEFill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
3. PREVFill with the previous non-NULL value, `FILL(PREV)`
4. NULLFill with NULL, `FILL(NULL)`
5. LINEARFill with the closest non-NULL value, `FILL(LINEAR)`
6. NEXTFill with the next non-NULL value, `FILL(NEXT)`
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
3. PREV: Fill with the previous non-NULL value, `FILL(PREV)`
4. NULL: Fill with NULL, `FILL(NULL)`
5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)`
6. NEXT: Fill with the next non-NULL value, `FILL(NEXT)`
In the above filling modes, except for `NONE` mode, the `fill` clause will be ignored if there is no data in the defined time range, i.e. no data would be filled and the query result would be empty. This behavior is reasonable when the filling mode is `PREV`, `NEXT`, `LINEAR`, because filling can't be performed if there is not any data. For filling modes `NULL` and `VALUE`, however, filling can be performed even though there is not any data, filling or not depends on the choice of user's application. To accomplish the need of this force filling behavior and not break the behavior of existing filling modes, TDengine added two new filling modes since version 3.0.3.0.
1. NULL_F: Fill `NULL` by force
2. VALUE_F: Fill `VALUE` by force
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force;`NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force; `NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
- When used with `INTERVAL` in stream processing: `NULL_F` and `NULL` are same, i.e. don't fill by force; `VALUE_F` and `VALUE` and same, i.e. don't fill by force. It's suggested that there is no filling by force in stream processing.
- When used with `INTERP`: `NULL` and `NULL_F` and same, i.e. filling by force; `VALUE` and `VALUE_F` are same, i.e. filling by force. It's suggested that there is always filling by force when used with `INTERP`.
......@@ -97,7 +98,7 @@ The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described be
There are two kinds of time windows: sliding window and flip time/tumbling window.
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e], [t1s, t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
![TDengine Database Time Window](./timewindow-1.webp)
......@@ -121,7 +122,7 @@ Please note that the `timezone` parameter should be configured to be the same va
### State Window
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12].
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07, 2019-04-28 14:22:10] and [2019-04-28 14:22:11, 2019-04-28 14:22:12].
![TDengine Database Status Window](./timewindow-3.webp)
......@@ -145,7 +146,7 @@ SELECT tbname, _wstart, CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE
### Session Window
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10, 2019-04-28 14:22:30] and [2019-04-28 14:23:10, 2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
![TDengine Database Session Window](./timewindow-2.webp)
......@@ -178,7 +179,7 @@ select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c
### Examples
A table of intelligent meters can be created by the SQL statement below
A table of intelligent meters can be created by the SQL statement below:
```
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
......
......@@ -112,7 +112,7 @@ SHOW STREAMS;
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggeringthe default value is AT_ONCE:
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering, the default value is AT_ONCE:
1. AT_ONCE: triggers on write
......
......@@ -67,7 +67,7 @@ description: This document describes the JSON data type in TDengine.
- The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes.
- JSON format
- JSON format:
- The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array.
- object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so.
......
......@@ -20,7 +20,7 @@ description: This document describes the usage of escape characters in TDengine.
1. If there are escape characters in identifiers (database name, table name, column name)
- Identifier without ``: Error will be returned because identifier must be constituted of digits, ASCII characters or underscore and can't be started with digits
- Identifier quoted with `` Original content is kept, no escaping
- Identifier quoted with ``: Original content is kept, no escaping
2. If there are escape characters in values
- The escape characters will be escaped as the above table. If the escape character doesn't match any supported one, the escape character "\" will be ignored.
- "%" and "\_" are used as wildcards in `like`. `\%` and `\_` should be used to represent literal "%" and "\_" in `like`,. If `\%` and `\_` are used out of `like` context, the evaluation result is "`\%`"and "`\_`", instead of "%" and "\_".
......@@ -184,7 +184,7 @@ Provides information about standard tables and subtables.
## INS_COLUMNS
| # | **列名** | **数据类型** | **说明** |
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | ---------------------- |
| 1 | table_name | BINARY(192) | Table name |
| 2 | db_name | BINARY(64) | Database name |
......
......@@ -4,7 +4,7 @@ sidebar_label: SHOW Statement
description: This document describes how to use the SHOW statement in TDengine.
---
`SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
`SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
## SHOW APPS
......@@ -187,10 +187,10 @@ SHOW TABLE DISTRIBUTED table_name;
Shows how table data is distributed.
Examples Below is an example of this command to display the block distribution of table `d0` in detailed format.
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
```sql
show table distributed d0\G;
show table distributed d0\G;
```
<details>
......@@ -201,31 +201,31 @@ _block_dist: Total_Blocks=[5] Total_Size=[93.65 KB] Average_size=[18.73 KB] Comp
Total_Blocks : Table `d0` contains total 5 blocks
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
Average_size: The average size of each block is 18.73 KB
Compression_Ratio: The data compression rate is 23.98%
*************************** 2.row ***************************
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
Total_Rows: Table `d0` contains 20,000 rows
Inmem_Rows The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
MinRows: The minimum number of rows in a block is 3,616
MinRows: The minimum number of rows in a block is 3,616
MaxRows The maximum number of rows in a block is 4,096B
MaxRows: The maximum number of rows in a block is 4,096B
Average_Rows The average number of rows in a block is 4,000
Average_Rows: The average number of rows in a block is 4,000
*************************** 3.row ***************************
_block_dist: Total_Tables=[1] Total_Files=[2]
Total_Tables: The number of child tables, 1 in this example
Total_Tables: The number of child tables, 1 in this example
Total_Files The number of files storing the table's data, 2 in this example
Total_Files: The number of files storing the table's data, 2 in this example
*************************** 4.row ***************************
......@@ -361,7 +361,7 @@ SHOW VARIABLES;
SHOW DNODE dnode_id VARIABLES;
```
Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node.
Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node.
## SHOW VGROUPS
......@@ -369,7 +369,7 @@ Shows the working configuration of the parameters that must be the same on each
SHOW [db_name.]VGROUPS;
```
Shows information about all vgroups in the current database.
Shows information about all vgroups in the current database.
## SHOW VNODES
......
......@@ -17,7 +17,7 @@ CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_typ
```
- OR REPLACE: if the UDF exists, the UDF properties are modified
- function_name: The scalar function name to be used in the SQL statement
- LANGUAGE 'C|Python': the programming language of UDF. Now C or Python is supported. If this clause is omitted, C is assumed as the programming language.
- LANGUAGE 'C|Python': the programming language of UDF. Now C or Python (v3.7+) is supported. If this clause is omitted, C is assumed as the programming language.
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
- output_type: The data type of the results of the UDF.
......
......@@ -13,7 +13,7 @@ Syntax Specifications used in this chapter:
- Information that you input is given in lowercase.
- \[ \] means optional input, excluding [] itself.
- | means one of a few options, excluding | itself.
- means the item prior to it can be repeated multiple times.
- ... means the item prior to it can be repeated multiple times.
To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
......
......@@ -22,11 +22,11 @@ wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.s
chmod +x TDinsight.sh
```
Prepare
Prepare:
1. TDengine Server
- The URL of REST servicefor example `http://localhost:6041` if TDengine is deployed locally
- The URL of REST service: for example `http://localhost:6041` if TDengine is deployed locally
- User name and password
2. Grafana Alert Notification
......@@ -36,7 +36,7 @@ You can use below command to setup Grafana alert notification.
An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
```bash
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
```
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
......@@ -274,7 +274,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||metric value|
|gauge|DOUBLE||metric value|
|client\_ip|NCHAR|TAG|client ip|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
......@@ -288,7 +288,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||metric value|
|gauge|DOUBLE||metric value|
|client\_ip|NCHAR|TAG|client ip|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
......@@ -302,7 +302,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||metric value|
|gauge|DOUBLE||metric value|
|endpoint|NCHAR|TAG|taosadpater endpoint|
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
......@@ -330,7 +330,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||metric value|
|gauge|DOUBLE||metric value|
|endpoint|NCHAR|TAG|taosadpater endpoint|
### taosadapter\_system\_cpu\_percent table
......@@ -340,6 +340,6 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||mertic value|
|gauge|DOUBLE||mertic value|
|endpoint|NCHAR|TAG|taosadpater endpoint|
......@@ -9,13 +9,13 @@ When a TDengine client is unable to access a TDengine server, the network connec
Diagnostics for network connections can be executed between Linux/Windows/macOS.
Diagnostic steps
Diagnostic steps:
1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
2. On the server side, execute command `taos -n server -P <port> -l <pktlen>` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
3. On the client side, execute command `taos -n client -h <fqdn of server> -P <port> -l <pktlen>` to send a testing package to the specified server and port.
-l <pktlen\> The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
Output of the server side for the example is below:
......
......@@ -83,13 +83,13 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
- authentication information is shown below
- authentication information is shown below:
```text
Authorization: Taosd <TOKEN>
```
- Basic authentication information is shown below
- Basic authentication information is shown below:
```text
Authorization: Basic <TOKEN>
......
......@@ -12,9 +12,9 @@ C/C++ developers can use TDengine's client driver and the C/C++ connector, to de
After TDengine server or client installation, `taos.h` is located at
- Linux:`/usr/local/taos/include`
- Windows:`C:\TDengine\include`
- macOS:`/usr/local/include`
- Linux: usr/local/taos/include`
- Windows: C:\TDengine\include`
- macOS: usr/local/include`
The dynamic libraries for the TDengine client driver are located in.
......@@ -412,7 +412,8 @@ In addition to writing data using the SQL method or the parameter binding API, w
Note that the timestamp resolution parameter only takes effect when the protocol type is `SML_LINE_PROTOCOL`.
For OpenTSDB's text protocol, timestamp resolution follows its official resolution rules - time precision is confirmed by the number of characters contained in the timestamp.
schemaless 其他相关的接口
schemaless interfaces:
- `TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid)`
- `TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision)`
- `TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid)`
......
......@@ -970,8 +970,8 @@ TaosConsumer consumer = new TaosConsumer<>(config);
- group.id: consumer: Specifies the group that the consumer is in.
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
- httpConnectTimeoutWebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
- messageWaitTimeoutsocket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
- For more information, see [Consumer Parameters](../../../develop/tmq).
#### Subscribe to consume data
......@@ -1267,9 +1267,9 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar
**Cause**taos-jdbcdriver 3.0.1 is compiled on JDK 11.
**Cause**: taos-jdbcdriver 3.0.1 is compiled on JDK 11.
**Solution** Use taos-jdbcdriver 3.0.2.
**Solution**: Use taos-jdbcdriver 3.0.2.
For additional troubleshooting, see [FAQ](../../../train-faq/faq).
......
......@@ -121,7 +121,7 @@ The parameters are described as follows:
- **username/password**: Username and password used to create connections.
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
- **database**: Specify the default database to connect to. It's optional.
- **params**Optional parameters.
- **params**: Optional parameters.
A sample DSN description string is as follows:
......
......@@ -255,7 +255,7 @@ The `connect()` function returns a `taos.TaosConnection` instance. In client-sid
All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified.
- `url` The URL of taosAdapter REST service. The default is <http://localhost:6041>.
- `url`: The URL of taosAdapter REST service. The default is <http://localhost:6041>.
- `user`: TDengine user name. The default is `root`.
- `password`: TDengine user password. The default is `taosdata`.
- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed.
......
......@@ -321,18 +321,18 @@ let cursor = conn.cursor();
| package name | version | TDengine version | Description |
|------------------|---------|---------------------|------------------------------------------------------------------|
| @tdengine/client | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
| td2.0-connector | 2.0.12 | 2.4.x;2.5.x;2.6.x | Fixed cursor.close() bug. |
| td2.0-connector | 2.0.11 | 2.4.x;2.5.x;2.6.x | Supports parameter binding, JSON tags and schemaless interface |
| td2.0-connector | 2.0.10 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
| td2.0-connector | 2.0.12 | 2.4.x; 2.5.x; 2.6.x | Fixed cursor.close() bug. |
| td2.0-connector | 2.0.11 | 2.4.x; 2.5.x; 2.6.x | Supports parameter binding, JSON tags and schemaless interface |
| td2.0-connector | 2.0.10 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
### REST Connector
| package name | version | TDengine version | Description |
|----------------------|---------|---------------------|---------------------------------------------------------------------------|
| @tdengine/rest | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
| td2.0-rest-connector | 1.0.7 | 2.4.x;2.5.x;2.6.x | Removed default port 6041。 |
| td2.0-rest-connector | 1.0.6 | 2.4.x;2.5.x;2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
| td2.0-rest-connector | 1.0.5 | 2.4.x;2.5.x;2.6.x | Support cloud token |
| td2.0-rest-connector | 1.0.3 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
| td2.0-rest-connector | 1.0.7 | 2.4.x; 2.5.x; 2.6.x | Removed default port 6041 |
| td2.0-rest-connector | 1.0.6 | 2.4.x; 2.5.x; 2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
| td2.0-rest-connector | 1.0.5 | 2.4.x; 2.5.x; 2.6.x | Support cloud token |
| td2.0-rest-connector | 1.0.3 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
## API Reference
......
......@@ -165,7 +165,7 @@ The parameters are described as follows:
* **username/password**: Username and password used to create connections.
* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`.
* **database**: Specify the default database to connect to. It's optional.
* **params**Optional parameters.
* **params**: Optional parameters.
A sample DSN description string is as follows:
......@@ -279,7 +279,7 @@ ws://localhost:6041/test
| TDengine.Connector | Description |
|--------------------|--------------------------------|
| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.|
| 3.0.1 | Support WebSocket and CloudWith function query, insert, and parameter binding|
| 3.0.1 | Support WebSocket and Cloud, With function query, insert, and parameter binding|
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
| 1.0.7 | Fixed TDengine.Query() memory leak. |
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |
......
......@@ -8,23 +8,23 @@ description: This document describes the TDengine PHP connector.
PHP Connector relies on TDengine client driver.
Project Repository<https://github.com/Yurunsoft/php-tdengine>
Project Repository: <https://github.com/Yurunsoft/php-tdengine>
After TDengine client or server is installed, `taos.h` is located at:
- Linux`/usr/local/taos/include`
- Windows`C:\TDengine\include`
- macOS`/usr/local/include`
- Linux: `/usr/local/taos/include`
- Windows: `C:\TDengine\include`
- macOS: `/usr/local/include`
TDengine client driver is located at:
- Linux: `/usr/local/taos/driver/libtaos.so`
- Windows: `C:\TDengine\taos.dll`
- macOS`/usr/local/lib/libtaos.dylib`
- macOS: `/usr/local/lib/libtaos.dylib`
## Supported Platforms
- Windows、Linux、MacOS
- Windows, Linux, and macOS
- PHP >= 7.4
......@@ -44,7 +44,7 @@ Regarding how to install TDengine client driver please refer to [Install Client
### Install php-tdengine
**Download Source Code Package and Unzip**
**Download Source Code Package and Unzip: **
```shell
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
......@@ -54,13 +54,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
**Non-Swoole Environment**
**Non-Swoole Environment: **
```shell
phpize && ./configure && make -j && make install
```
**Specify TDengine location**
**Specify TDengine location: **
```shell
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
......@@ -69,7 +69,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
> `--with-tdengine-dir=` is followed by TDengine location.
> It's useful in case TDengine installatio location can't be found automatically or MacOS.
**Swoole Environment**
**Swoole Environment: **
```shell
phpize && ./configure --enable-swoole && make -j && make install
......
......@@ -62,7 +62,7 @@ The different database framework specifications for various programming language
| **Regular Query** | Support | Support | Support | Support | Support | Support |
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
| **Schemaless** | Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
......
......@@ -245,7 +245,7 @@ The parameters listed in this section apply to all function modes.
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
- ** childtable_from and childtable_to ** : specify the child table range to create. The range is [childtable_from, childtable_to).
 
- ** continue_if_fail ** : allow the user to specify the reaction if the insertion failed.
- "continue_if_fail" : "no" // means taosBenchmark will exit if it fails to insert as default reaction behavior.
......
......@@ -149,7 +149,7 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica
Use the `uid` value obtained above as `-E` input.
```bash
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
```
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
......@@ -233,7 +233,7 @@ After the importing is done, `TDinsight for 3.x` dashboard is available on the p
In the `TDinsight for 3.x` dashboard, choose the database used by taosKeeper to store monitoring data, you can see the monitoring result.
![TDengine Database TDinsight 选择数据库](./assets/select_dashboard_db.webp)
![TDengine Database TDinsight select database](./assets/select_dashboard_db.webp)
## TDinsight dashboard details
......
......@@ -200,11 +200,16 @@ As shown above, select the `TDengine` data source in the `Query` and enter the c
- Group by column name(s): `group by` or `partition by` columns name split by comma. By setting `Group by column name(s)`, it can show multi-dimension data if Sql is `group by` or `partition by`. Such as, it can show data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep`.
- Format to: format legend for `group by` or `partition by`. Such as it can display series data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep` and `Format to` is `mem_system_{{dnode_ep}}`.
:::note
Since the REST connection because is stateless. Grafana plugin can use <db_name>.<table_name> in the SQL command to specify the database name.
:::
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
![TDengine Database TDinsight plugin create dashboard 2](./grafana/create_dashboard2.webp)
查询每台 TDengine 服务器指定间隔系统内存平均使用量如下.
The example to query the average system memory usage for the specified interval on each server as follows.
![TDengine Database TDinsight plugin create dashboard 2](./grafana/create_dashboard3.webp)
......@@ -217,7 +222,7 @@ You can install TDinsight dashboard in data source configuration page (like `htt
![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp)
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
......
......@@ -47,7 +47,7 @@ Select "Rule" in the "Rule Engine" on the left and click the "Create" button: !
### Edit SQL fields
Copy SQL bellow and paste it to the SQL edit area
Copy SQL bellow and paste it to the SQL edit area:
```sql
SELECT
......@@ -76,7 +76,8 @@ Select "WebHook" and fill in the request URL as the address and port of the serv
### Edit "action"
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is:
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is:
```
Basic cm9vdDp0YW9zZGF0YQ==
```
......
......@@ -314,7 +314,6 @@ connection.backoff.ms=5000
topic.prefix=tdengine-source-
poll.interval.ms=1000
fetch.max.rows=100
out.format=line
key.converter=org.apache.kafka.connect.storage.StringConverter
value.converter=org.apache.kafka.connect.storage.StringConverter
```
......@@ -353,7 +352,7 @@ confluent local services connect connector load TDengineSourceConnector --config
### View topic data
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data.
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
````
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
......@@ -424,11 +423,13 @@ The following configuration items apply to TDengine Sink Connector and TDengine
### TDengine Source Connector specific configuration
1. `connection.database`: source database name, no default value.
2. `topic.prefix`: topic name prefix after data is imported into kafka. Use `topic.prefix` + `connection.database` name as the full topic name. Defaults to the empty string "".
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. Default "1970-01-01 00:00:00".
4. `poll.interval.ms`: Pull data interval, the unit is ms. Default is 1000.
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database. Default is 100.
6. `out.format`: The data format. The value could be line or json. The line represents the InfluxDB Line protocol format, and json represents the OpenTSDB JSON format. Default is `line`.
2. `topic.prefix`: topic name prefix used when importing data into kafka. Its defaults value is empty string "".
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. If it is not set, the data importing to Kafka will be started from the first/oldest row in the database.
4. `poll.interval.ms`: The time interval for checking newly created tables or removed tables, default value is 1000.
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database, default is 100.
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000.
7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>-<stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>`.
## Other notes
......
......@@ -10,7 +10,7 @@ TDengine is a high-performance, scalable time-series database that supports SQL.
The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for "TDengine".
![02](gds/gds-02.png.webp)
......@@ -30,8 +30,8 @@ After the connection is established, you can use Data Studio to process your dat
![06](gds/gds-06.png.webp)
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data some examples are shown below.
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data - some examples are shown below.
![07](gds/gds-07.png.webp)
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data.
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we're sure you'll be able to gain new insights and obtain even more value from your data.
......@@ -26,9 +26,9 @@ A complete TDengine system runs on one or more physical nodes. Logically, a comp
**Management node (mnode)**: A virtual logical unit (M in the figure) responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes. At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). mnode adopts RAFT protocol to guarantee high data availability and high data reliability. Any data operation can only be performed through the Leader in the RAFT group. The first mnode in the mnode RAFT group is created automatically when the first dnode of the cluster is deployed. Other two follower mnodes need to be created through SQL command in TDengine CLI. There can be at most one mnode in a single dnode, and the mnode is identified by the EP of the dnode where it's located. Each dnode can communicate with each other to automatically get the EP of all mnodes.
**Computation node (qnode)** A virtual logical unit (Q in the figure) responsible for executing query and computing tasks including the `show` commands based on system built-in tables. There can be multiple qnodes configured in a TDengine cluster to share the query and computing tasks. A qnode is not coupled with a specific database, that means each qnode can execute the query tasks for multiple databases in parallel. There can be at most one qnode in a single dnode, and the qnode is identified by the EP of the dnode. TDengine client driver can get the list of qnodes through the communication with mnode. If there is no qnode available in the system, query and computing tasks are executed by vnodes. When a query task is executed, according to the execution plan, one or more qnodes may be scheduled by the scheduler to execute the task. qnode can get data from vnode, and send the execution result to other qnodes for further processing. With introducing qnodes, TDengine achieves the separation between storage and computing.
**Computation node (qnode)**: A virtual logical unit (Q in the figure) responsible for executing query and computing tasks including the `show` commands based on system built-in tables. There can be multiple qnodes configured in a TDengine cluster to share the query and computing tasks. A qnode is not coupled with a specific database, that means each qnode can execute the query tasks for multiple databases in parallel. There can be at most one qnode in a single dnode, and the qnode is identified by the EP of the dnode. TDengine client driver can get the list of qnodes through the communication with mnode. If there is no qnode available in the system, query and computing tasks are executed by vnodes. When a query task is executed, according to the execution plan, one or more qnodes may be scheduled by the scheduler to execute the task. qnode can get data from vnode, and send the execution result to other qnodes for further processing. With introducing qnodes, TDengine achieves the separation between storage and computing.
**Stream Processing node (snode)** A virtual logical unit (S in the figure) responsible for stream processing tasks is introduced in TDengine. There can be multiple snodes configured in a TDengine cluster to share the burden of stream processing tasks. snode is not coupled with a specific stream, that means a single snode can execute the tasks of multiple streams. There can be at most one snode in a single dnode, it's identified by the EP of the dnode. mnode schedules available snodes to perform the stream processing tasks. If there is no snode available in the system, stream processing tasks are executed in vnodes.
**Stream Processing node (snode)**: A virtual logical unit (S in the figure) responsible for stream processing tasks is introduced in TDengine. There can be multiple snodes configured in a TDengine cluster to share the burden of stream processing tasks. snode is not coupled with a specific stream, that means a single snode can execute the tasks of multiple streams. There can be at most one snode in a single dnode, it's identified by the EP of the dnode. mnode schedules available snodes to perform the stream processing tasks. If there is no snode available in the system, stream processing tasks are executed in vnodes.
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed using RAFT protocol. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating a DB, and the default is 1. Using the multiple replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID, to each vgroup. Virtual nodes with the same vnode group ID belong to the same vgroup. If `replica` is set to 1, it means no data replication. The number of replication for a database can be dynamically changed to 3 for high data reliability. Even if a virtual node group is deleted, its ID will not be reused.
......@@ -59,7 +59,7 @@ After obtaining the mnode EP list, the data node initiates the connection. It wi
- Step : Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
- Step 2: In the system configuration parameter file `taos.cfg` of the new data node, set the `firstEp` and `secondEp` parameters to the EP of any two data nodes in the existing cluster. If there is only one existing data node in the system, skip parameter `secondEp`. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if its not an mnode itself, it will reply to the connection initiator with the mnode EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again with mnode. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it's not an mnode itself, it will reply to the connection initiator with the mnode EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again with mnode. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
### A Typical Data Writing Process
......@@ -107,7 +107,7 @@ For large-scale data management, to achieve scale-out, it is generally necessary
VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application.
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables' quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes.
......@@ -132,9 +132,9 @@ Leader Vnode uses a writing process as follows:
<center> Figure 3: TDengine Leader writing process </center>
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
2. Leader vnode will write the original request packet into database log file WAL. If the database configuration parameter `“wal_level”` is set to 1, vnode doesn't invoked fsync. If `wal_level` is set to 2, fsync is invoked according to another database parameter `wal_fsync_period`.
2. Leader vnode will write the original request packet into database log file WAL. If the database configuration parameter `"wal_level"` is set to 1, vnode doesn't invoked fsync. If `wal_level` is set to 2, fsync is invoked according to another database parameter `wal_fsync_period`.
3. If there are multiple replicas, the leader vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Leader vnode Writes the data into memory and add the record to “skip list”;
4. Leader vnode Writes the data into memory and add the record to "skip list";
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
......@@ -148,7 +148,7 @@ For a follower vnode, the write process as follows:
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
2. The behavior regarding `wal_level` and `wal_fsync_period` in a follower vnode is same as the leader vnode.
3. Write into memory and add the record to “skip list”.
3. Write into memory and add the record to "skip list".
Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step. But writing into memory and WAL is exactly the same.
......@@ -156,7 +156,7 @@ Compared with Leader vnode, follower vnode has no forwarding or reply confirmati
Vnode maintains a version number. When memory data is persisted, the version number is also persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
When a vnode starts, its role (leader, follower) is uncertain, and the data is in an unsynchronized state. Its necessary to establish TCP connections with other vnodes in the virtual node group and exchange status, including version and its own role. Through the exchange, the system implements a leader-selection process according to standard RAFT protocol.
When a vnode starts, its role (leader, follower) is uncertain, and the data is in an unsynchronized state. It's necessary to establish TCP connections with other vnodes in the virtual node group and exchange status, including version and its own role. Through the exchange, the system implements a leader-selection process according to standard RAFT protocol.
### Synchronous Replication
......@@ -192,7 +192,7 @@ When data is written to disk, the system decides whether to compress the data ba
### Tiered Storage
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter "dataDir" to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter "dataDir".
dataDir format is as follows:
......@@ -202,7 +202,7 @@ dataDir data_path [tier_level]
Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, ..., /mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
```
dataDir /mnt/disk1/taos
......
......@@ -200,7 +200,7 @@ After migrating via DataX, we found that we can significantly improve the effici
### 2. Manual data migration
Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statementwritten to the database.
Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statement-written to the database.
Manual migration of data requires attention to the following two issues:
......@@ -258,7 +258,7 @@ Equivalent function: apercentile
Example:
```sql
Select apercentile(col1, 50, t-digest) from table_name
select apercentile(col1, 50, "t-digest") from table_name
```
Remark:
......
......@@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
## 3.0.4.2
<Release type="tdengine" version="3.0.4.2" />
## 3.0.4.1
<Release type="tdengine" version="3.0.4.1" />
......
......@@ -78,7 +78,8 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY: {
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
memcpy(str + len, row[i], charLen);
len += charLen;
......
......@@ -76,7 +76,8 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY: {
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
memcpy(str + len, row[i], charLen);
len += charLen;
......
......@@ -92,7 +92,7 @@ TDengine 的主要功能如下:
## 典型适用场景
作为一个高性能、分布式、支持 SQL 的时序数据库(Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
作为一个高性能、分布式、支持 SQL 的时序数据库(Time-series Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
### 数据源特点和需求
......
......@@ -82,7 +82,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version>
<version>3.2.1</version>
</dependency>
```
......
......@@ -161,7 +161,7 @@ Query OK, 6 rows in database (0.005515s)
:::note
1. 无论是使用 REST 连接还是原生连接的连接器,以上示例代码都能正常工作。
2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。
2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。除了在 REST 参数中指定数据库以外也可以在 SQL 语句中使用 <db_name>.<table_name> 来指定数据库。
:::
......
......@@ -285,10 +285,10 @@ CREATE TOPIC topic_name AS DATABASE db_name;
| 参数名称 | 类型 | 参数说明 | 备注 |
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
| `td.connect.ip` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
| `td.connect.user` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
| `td.connect.pass` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
| `td.connect.port` | integer | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
| `td.connect.ip` | string | 服务端的 IP 地址 | |
| `td.connect.user` | string | 用户名 | |
| `td.connect.pass` | string | 密码 | |
| `td.connect.port` | integer | 服务端的端口号 | |
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 |
| `client.id` | string | 客户端 ID | 最大长度:192。 |
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
......@@ -321,10 +321,11 @@ tmq_conf_destroy(conf);
</TabItem>
<TabItem value="java" label="Java">
对于 Java 程序,使用如下配置项:
对于 Java 程序,还可以使用如下配置项:
| 参数名称 | 类型 | 参数说明 |
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
| `td.connect.type` | string | 连接类型,"jni" 指原生连接,"ws" 指 websocket 连接,默认值为 "jni" |
| `bootstrap.servers` | string | 连接地址,如 `localhost:6030` |
| `value.deserializer` | string | 值解析方法,使用此方法应实现 `com.taosdata.jdbc.tmq.Deserializer` 接口或继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer` 类 |
| `value.deserializer.encoding` | string | 指定字符串解析的字符集 | |
......@@ -401,21 +402,6 @@ from taos.tmq import Consumer
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
```
其中,`configs` 为 dict 类型,传递创建 Consumer 的参数。可以配置的参数有:
| 参数名称 | 类型 | 参数说明 | 备注 |
|:------:|:----:|:-------:|:---:|
| `td.connect.ip` | string | 用于创建连接||
| `td.connect.user` | string | 用于创建连接||
| `td.connect.pass` | string | 用于创建连接||
| `td.connect.port` | string | 用于创建连接||
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192 |
| `client.id` | string | 客户端 ID | 最大长度:192 |
| `msg.with.table.name` | string | 是否允许从消息中解析表名,不适用于列订阅 | 合法值:`true`, `false` |
| `enable.auto.commit` | string | 启用自动提交 | 合法值:`true`, `false` |
| `auto.commit.interval.ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值:5000 ms |
| `auto.offset.reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` |
</TabItem>
<TabItem label="Node.JS" value="Node.JS">
......
......@@ -45,7 +45,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **数据订阅(TMQ)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **数据订阅(TMQ)** | 暂不支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
......
......@@ -869,10 +869,15 @@ FIRST(expr)
### INTERP
```sql
INTERP(expr)
INTERP(expr [, ignore_null_values])
ignore_null_values: {
0
| 1
}
```
**功能说明**:返回指定时间截面指定列的记录值或插值。
**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为0。
**返回数据类型**:同字段类型。
......
......@@ -38,7 +38,7 @@ CREATE [OR REPLACE] AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE
```
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
- function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致;
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言(v3.7+)。
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;;
- output_type:此函数计算结果的数据类型名称;
- buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。
......
......@@ -177,7 +177,7 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
```bash
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
```
我们提供了一个“-E”选项,用于从命令行配置 TDinsight 使用现有的通知通道(Notification Channel)。假设你的 Grafana 用户和密码是 `admin:admin`,使用以下命令获取已有的通知通道的`uid`:
......@@ -189,7 +189,7 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica
使用上面获取的 `uid` 值作为 `-E` 输入。
```bash
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
```
如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。
......
......@@ -32,7 +32,7 @@ chmod +x TDinsight.sh
- 使用已经存在的 Grafana Notification Channel `uid`,参数 `-E`。该参数可以使用 `curl -u admin:admin localhost:3000/api/alert-notifications |jq` 来获取。
```bash
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
```
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`
......@@ -270,7 +270,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|gauge|DOUBLE||监控指标值|
|client\_ip|NCHAR|TAG|client ip|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
......@@ -284,7 +284,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|gauge|DOUBLE||监控指标值|
|client\_ip|NCHAR|TAG|client ip|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|request\_method|NCHAR|TAG|request method|
......@@ -298,7 +298,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|gauge|DOUBLE||监控指标值|
|endpoint|NCHAR|TAG|taosadpater endpoint|
### taosadapter\_restful\_http\_request\_summary\_milliseconds 表
......@@ -326,7 +326,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|gauge|DOUBLE||监控指标值|
|endpoint|NCHAR|TAG|taosadpater endpoint|
### taosadapter\_system\_cpu\_percent 表
......@@ -336,5 +336,5 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|\_ts|TIMESTAMP||timestamp|
|guage|DOUBLE||监控指标值|
|gauge|DOUBLE||监控指标值|
|endpoint|NCHAR|TAG|taosadpater endpoint|
......@@ -200,6 +200,12 @@ docker run -d \
- Group by column name(s): **半角**逗号分隔的 `group by` 或 `partition by` 列名。如果是 `group by` or `partition by` 查询语句,设置 `Group by` 列,可以展示多维数据。例如:INPUT SQL 为 `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)`,设置 Group by 列名为 `dnode_ep`,可以按 `dnode_ep` 展示数据。
- Format to: Group by 或 Partition by 场景下多维数据 legend 格式化格式。例如上述 INPUT SQL,将 Format to 设置为 `mem_system_{{dnode_ep}}`,展示的 legend 名字为格式化的列名。
:::note
由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。Grafana 插件中 SQL 语句中可以使用 <db_name>.<table_name> 来指定数据库。
:::
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
![TDengine Database Grafana plugin create dashboard](./create_dashboard2.webp)
......
......@@ -318,7 +318,6 @@ connection.backoff.ms=5000
topic.prefix=tdengine-source-
poll.interval.ms=1000
fetch.max.rows=100
out.format=line
key.converter=org.apache.kafka.connect.storage.StringConverter
value.converter=org.apache.kafka.connect.storage.StringConverter
```
......@@ -357,7 +356,7 @@ confluent local services connect connector load TDengineSourceConnector --config
### 查看 topic 数据
使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。
使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
```
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
......@@ -434,11 +433,12 @@ confluent local services connect connector unload TDengineSourceConnector
### TDengine Source Connector 特有的配置
1. `connection.database`: 源数据库名称,无缺省值。
2. `topic.prefix`: 数据导入 kafka 后 topic 名称前缀。 使用 `topic.prefix` + `connection.database` 名称作为完整 topic 名。默认为空字符串 ""。
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认为 "1970-01-01 00:00:00"
4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认为 1000。
2. `topic.prefix`: 数据导入 kafka 时使用的 topic 名称的前缀。默认为空字符串 ""。
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss',若未指定则从指定 DB 中最早的一条记录开始
4. `poll.interval.ms`: 检查是否有新建或删除的表的时间间隔,单位为 ms。默认为 1000。
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认为 line。
6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 1000.
7. `topic.per.stable`: 如果设置为true,表示一个超级表对应一个 Kafka topic,topic的命名规则 `<topic.prefix>-<connection.database>-<stable.name>`;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 `<topic.prefix>-<connection.database>`
## 其他说明
......
......@@ -247,4 +247,10 @@ launchctl limit maxfiles
该提示是创建 db 的 vnode 数量不够了,需要的 vnode 不能超过了 dnode 中 vnode 的上限。因为系统默认是一个 dnode 中有 CPU 核数两倍的 vnode,也可以通过配置文件中的参数 supportVnodes 控制。
正常调大 taos.cfg 中 supportVnodes 参数即可。
### 21 【查询】在服务器上的使用 tao-CLI 能查到指定时间段的数据,但在客户端机器上查不到?
这种情况是因为客户端与服务器上设置的时区不一致导致的,调整客户端与服务器的时区一致即可解决。
### 22 【表名】表名确认是存在的,但写入或查询时报表不存在错误,非常奇怪,什么原因?
TDengine 中的所有名称,包括数据库名、表名等都是区分大小写的,如果这些名称在程序或 taos-CLI 中没有使用反引号(`)括起来使用,即使你输入的是大写的,引擎也会转化成小写来使用,如果名称前后加上了反引号,引擎就不会再转化成小写,会保持原样来使用。
......@@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
## 3.0.4.2
<Release type="tdengine" version="3.0.4.2" />
## 3.0.4.1
<Release type="tdengine" version="3.0.4.1" />
......
......@@ -14,10 +14,6 @@ import Release from "/components/ReleaseV3";
<Release type="tools" version="2.5.0" />
## 2.5.0
<Release type="tools" version="2.5.0" />
## 2.4.12
<Release type="tools" version="2.4.12" />
......
......@@ -73,7 +73,7 @@ static int32_t init_env() {
taos_free_result(pRes);
// create database
pRes = taos_query(pConn, "create database tmqdb precision 'ns'");
pRes = taos_query(pConn, "create database tmqdb precision 'ns' WAL_RETENTION_PERIOD 3600");
if (taos_errno(pRes) != 0) {
printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
goto END;
......@@ -289,7 +289,7 @@ void consume_repeatly(tmq_t* tmq) {
}
}
free(pAssign);
tmq_free_assignment(pAssign);
// let's do it again
basic_consume_loop(tmq);
......
......@@ -162,6 +162,7 @@ static int l_query(lua_State *L){
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY:
//printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]);
lua_pushlstring(L,(char *)row[i], length[i]);
break;
......
......@@ -161,6 +161,7 @@ static int l_query(lua_State *L){
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY:
//printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]);
lua_pushlstring(L,(char *)row[i], length[i]);
break;
......
......@@ -51,7 +51,8 @@ typedef void TAOS_SUB;
#define TSDB_DATA_TYPE_BLOB 18 // binary
#define TSDB_DATA_TYPE_MEDIUMBLOB 19
#define TSDB_DATA_TYPE_BINARY TSDB_DATA_TYPE_VARCHAR // string
#define TSDB_DATA_TYPE_MAX 20
#define TSDB_DATA_TYPE_GEOMETRY 20 // geometry
#define TSDB_DATA_TYPE_MAX 21
typedef enum {
TSDB_OPTION_LOCALE,
......@@ -288,6 +289,7 @@ DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,
int32_t *numOfAssignment);
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */
......@@ -310,6 +312,7 @@ DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_comm
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
/* ------------------------------ TAOSX -----------------------------------*/
// note: following apis are unstable
......
......@@ -82,7 +82,7 @@ typedef struct STuplePos {
int32_t pageId;
int32_t offset;
};
STupleKey streamTupleKey;
SWinKey streamTupleKey;
};
} STuplePos;
......@@ -208,11 +208,6 @@ typedef struct SSDataBlock {
SDataBlockInfo info;
} SSDataBlock;
enum {
FETCH_TYPE__DATA = 0,
FETCH_TYPE__NONE,
};
typedef struct SVarColAttr {
int32_t* offset; // start position for each entry in the list
uint32_t length; // used buffer size that contain the valid data
......
......@@ -215,7 +215,7 @@ void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows);
void blockDataCleanup(SSDataBlock* pDataBlock);
void blockDataEmpty(SSDataBlock* pDataBlock);
size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize);
size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int32_t extraSize);
int32_t blockDataTrimFirstRows(SSDataBlock* pBlock, size_t n);
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n);
......
......@@ -124,6 +124,7 @@ extern int32_t tsRedirectFactor;
extern int32_t tsRedirectMaxPeriod;
extern int32_t tsMaxRetryWaitTime;
extern bool tsUseAdapter;
extern int32_t tsMetaCacheMaxSize;
extern int32_t tsSlowLogThreshold;
extern int32_t tsSlowLogScope;
......@@ -193,7 +194,7 @@ struct SConfig *taosGetCfg();
void taosSetAllDebugFlag(int32_t flag, bool rewrite);
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
int32_t taosSetCfg(SConfig *pCfg, char *name);
int32_t taosApplyLocalCfg(SConfig *pCfg, char *name);
void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
#ifdef __cplusplus
......
......@@ -952,6 +952,9 @@ int32_t tSerializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq
int32_t tDeserializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq* pReq);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
int64_t dbId;
int32_t cfgVersion;
int32_t numOfVgroups;
int32_t numOfStables;
int32_t buffer;
......@@ -984,8 +987,13 @@ typedef struct {
int16_t sstTrigger;
} SDbCfgRsp;
typedef SDbCfgRsp SDbCfgInfo;
int32_t tSerializeSDbCfgRspImpl(SEncoder *encoder, const SDbCfgRsp *pRsp);
int32_t tSerializeSDbCfgRsp(void* buf, int32_t bufLen, const SDbCfgRsp* pRsp);
int32_t tDeserializeSDbCfgRsp(void* buf, int32_t bufLen, SDbCfgRsp* pRsp);
int32_t tDeserializeSDbCfgRspImpl(SDecoder* decoder, SDbCfgRsp *pRsp);
void tFreeSDbCfgRsp(SDbCfgRsp *pRsp);
typedef struct {
int32_t rowNum;
......@@ -1042,12 +1050,17 @@ int32_t tDeserializeSDnodeListRsp(void* buf, int32_t bufLen, SDnodeListRsp* pRsp
void tFreeSDnodeListRsp(SDnodeListRsp* pRsp);
typedef struct {
SArray* pArray; // Array of SUseDbRsp
} SUseDbBatchRsp;
SUseDbRsp *useDbRsp;
SDbCfgRsp *cfgRsp;
} SDbHbRsp;
typedef struct {
SArray* pArray; // Array of SDbHbRsp
} SDbHbBatchRsp;
int32_t tSerializeSUseDbBatchRsp(void* buf, int32_t bufLen, SUseDbBatchRsp* pRsp);
int32_t tDeserializeSUseDbBatchRsp(void* buf, int32_t bufLen, SUseDbBatchRsp* pRsp);
void tFreeSUseDbBatchRsp(SUseDbBatchRsp* pRsp);
int32_t tSerializeSDbHbBatchRsp(void* buf, int32_t bufLen, SDbHbBatchRsp* pRsp);
int32_t tDeserializeSDbHbBatchRsp(void* buf, int32_t bufLen, SDbHbBatchRsp* pRsp);
void tFreeSDbHbBatchRsp(SDbHbBatchRsp* pRsp);
typedef struct {
SArray* pArray; // Array of SGetUserAuthRsp
......@@ -1629,6 +1642,7 @@ typedef struct {
char fqdn[TSDB_FQDN_LEN];
int32_t port;
int8_t force;
int8_t unsafe;
} SDropDnodeReq;
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
......@@ -3177,7 +3191,8 @@ typedef struct {
char dbFName[TSDB_DB_FNAME_LEN];
uint64_t suid;
int32_t version;
SArray* pIndex;
int32_t indexSize;
SArray* pIndex; // STableIndexInfo
} STableIndexRsp;
int32_t tSerializeSTableIndexRsp(void* buf, int32_t bufLen, const STableIndexRsp* pRsp);
......
此差异已折叠。
......@@ -269,7 +269,7 @@ typedef struct {
(IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
#define IS_VAR_DATA_TYPE(t) \
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
......@@ -316,6 +316,8 @@ static FORCE_INLINE bool isNull(const void *val, int32_t type) {
return *(uint32_t *)val == TSDB_DATA_UINT_NULL;
case TSDB_DATA_TYPE_UBIGINT:
return *(uint64_t *)val == TSDB_DATA_UBIGINT_NULL;
case TSDB_DATA_TYPE_GEOMETRY:
return varDataLen(val) == sizeof(int8_t) && *(uint8_t *)varDataVal(val) == TSDB_DATA_GEOMETRY_NULL;
default:
return false;
......
......@@ -129,13 +129,14 @@ typedef struct SSTableVersion {
int32_t smaVer;
} SSTableVersion;
typedef struct SDbVgVersion {
typedef struct SDbCacheInfo {
char dbFName[TSDB_DB_FNAME_LEN];
int64_t dbId;
int32_t vgVersion;
int32_t cfgVersion;
int32_t numOfTable; // unit is TSDB_TABLE_NUM_UNIT
int64_t stateTs;
} SDbVgVersion;
} SDbCacheInfo;
typedef struct STbSVersion {
char* tbFName;
......@@ -148,7 +149,6 @@ typedef struct SUserAuthVersion {
int32_t version;
} SUserAuthVersion;
typedef SDbCfgRsp SDbCfgInfo;
typedef SUserIndexRsp SIndexInfo;
typedef void (*catalogCallback)(SMetaData* pResult, void* param, int32_t code);
......@@ -180,6 +180,8 @@ int32_t catalogGetDBVgInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char*
int32_t catalogUpdateDBVgInfo(SCatalog* pCatalog, const char* dbName, uint64_t dbId, SDBVgInfo* dbInfo);
int32_t catalogUpdateDbCfg(SCatalog* pCtg, const char* dbFName, uint64_t dbId, SDbCfgInfo* cfgInfo);
int32_t catalogRemoveDB(SCatalog* pCatalog, const char* dbName, uint64_t dbId);
int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName);
......@@ -212,7 +214,7 @@ int32_t catalogGetSTableMeta(SCatalog* pCatalog, SRequestConnInfo* pConn, const
int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp* rspMsg);
int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp* rspMsg);
int32_t catalogAsyncUpdateTableMeta(SCatalog* pCtg, STableMetaRsp* pMsg);
int32_t catalogGetCachedTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta);
......@@ -304,7 +306,7 @@ int32_t catalogGetDnodeList(SCatalog* pCatalog, SRequestConnInfo* pConn, SArray*
int32_t catalogGetExpiredSTables(SCatalog* pCatalog, SSTableVersion** stables, uint32_t* num);
int32_t catalogGetExpiredDBs(SCatalog* pCatalog, SDbVgVersion** dbs, uint32_t* num);
int32_t catalogGetExpiredDBs(SCatalog* pCatalog, SDbCacheInfo** dbs, uint32_t* num);
int32_t catalogGetExpiredUsers(SCatalog* pCtg, SUserAuthVersion** users, uint32_t* num);
......
......@@ -190,8 +190,6 @@ STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int
SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo);
void verifyOffset(void *pWalReader, STqOffsetVal* pOffset);
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
void qStreamSetOpen(qTaskInfo_t tinfo);
......
......@@ -158,6 +158,17 @@ typedef enum EFunctionType {
FUNCTION_TYPE_STDDEV_PARTIAL,
FUNCTION_TYPE_STDDEV_MERGE,
// geometry functions
FUNCTION_TYPE_GEOM_FROM_TEXT = 4250,
FUNCTION_TYPE_AS_TEXT,
FUNCTION_TYPE_MAKE_POINT,
FUNCTION_TYPE_INTERSECTS,
FUNCTION_TYPE_EQUALS,
FUNCTION_TYPE_TOUCHES,
FUNCTION_TYPE_COVERS,
FUNCTION_TYPE_CONTAINS,
FUNCTION_TYPE_CONTAINS_PROPERLY,
// user defined funcion
FUNCTION_TYPE_UDF = 10000
} EFunctionType;
......@@ -230,6 +241,7 @@ typedef enum EFuncDataRequired {
FUNC_DATA_REQUIRED_SMA_LOAD,
FUNC_DATA_REQUIRED_NOT_LOAD,
FUNC_DATA_REQUIRED_FILTEROUT,
FUNC_DATA_REQUIRED_ALL_FILTEROUT,
} EFuncDataRequired;
EFuncDataRequired fmFuncDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow);
......
......@@ -109,7 +109,7 @@ typedef uint16_t VarDataLenT; // maxVarDataLen: 65535
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
#define IS_VAR_DATA_TYPE(t) \
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
static FORCE_INLINE char *udfColDataGetData(const SUdfColumn *pColumn, int32_t row) {
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_GEOM_FUNC_H
#define TDENGINE_GEOM_FUNC_H
#ifdef __cplusplus
extern "C" {
#endif
#include "function.h"
int32_t makePointFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t geomFromTextFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t asTextFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t intersectsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t equalsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t touchesFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t coversFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t containsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t containsProperlyFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
#ifdef __cplusplus
}
#endif
#endif // TDENGINE_GEOM_FUNC_H
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_GEOS_WRAPPER_H
#define TDENGINE_GEOS_WRAPPER_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
#include "os.h"
#include "tgeosctx.h"
void geosFreeBuffer(void *buffer);
int32_t initCtxMakePoint();
int32_t doMakePoint(double x, double y, unsigned char **outputGeom, size_t *size);
int32_t initCtxGeomFromText();
int32_t doGeomFromText(const char *inputWKT, unsigned char **outputGeom, size_t *size);
int32_t initCtxAsText();
int32_t doAsText(const unsigned char *inputGeom, size_t size, char **outputWKT);
int32_t initCtxRelationFunc();
int32_t doIntersects(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
bool swapped, char *res);
int32_t doEquals(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
bool swapped, char *res);
int32_t doTouches(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
bool swapped, char *res);
int32_t doCovers(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
bool swapped, char *res);
int32_t doContains(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
bool swapped, char *res);
int32_t doContainsProperly(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
bool swapped, char *res);
int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom, const GEOSPreparedGeometry **outputPreparedGeom);
void destroyGeometry(GEOSGeometry **geom, const GEOSPreparedGeometry **preparedGeom);
#ifdef __cplusplus
}
#endif
#endif /*TDENGINE_GEOS_WRAPPER_H*/
......@@ -249,6 +249,7 @@ typedef struct SDropDnodeStmt {
char fqdn[TSDB_FQDN_LEN];
int32_t port;
bool force;
bool unsafe;
} SDropDnodeStmt;
typedef struct SAlterDnodeStmt {
......
......@@ -313,6 +313,7 @@ void nodesDestroyAllocator(int64_t allocatorId);
SNode* nodesMakeNode(ENodeType type);
void nodesDestroyNode(SNode* pNode);
void nodesFree(void* p);
SNodeList* nodesMakeList();
int32_t nodesListAppend(SNodeList* pList, SNode* pNode);
......
......@@ -90,28 +90,23 @@ typedef struct STbVerInfo {
int32_t tversion;
} STbVerInfo;
/*
* ASSERT(sizeof(SCTableMeta) == 24)
* ASSERT(tableType == TSDB_CHILD_TABLE)
* The cached child table meta info. For each child table, 24 bytes are required to keep the essential table info.
*/
#pragma pack(push, 1)
typedef struct SCTableMeta {
int32_t vgId : 24;
int8_t tableType;
uint64_t uid;
uint64_t suid;
int32_t vgId;
int8_t tableType;
} SCTableMeta;
#pragma pack(pop)
/*
* Note that the first 24 bytes of STableMeta are identical to SCTableMeta, it is safe to cast a STableMeta to be a
* SCTableMeta.
*/
#pragma pack(push, 1)
typedef struct STableMeta {
// BEGIN: KEEP THIS PART SAME WITH SCTableMeta
int32_t vgId : 24;
int8_t tableType;
uint64_t uid;
uint64_t suid;
int32_t vgId;
int8_t tableType;
// END: KEEP THIS PART SAME WITH SCTableMeta
// if the table is TSDB_CHILD_TABLE, the following information is acquired from the corresponding super table meta
......@@ -121,6 +116,7 @@ typedef struct STableMeta {
STableComInfo tableInfo;
SSchema schema[];
} STableMeta;
#pragma pack(pop)
typedef struct SDBVgInfo {
int32_t vgVersion;
......@@ -130,7 +126,7 @@ typedef struct SDBVgInfo {
int32_t numOfTable; // DB's table num, unit is TSDB_TABLE_NUM_UNIT
int64_t stateTs;
SHashObj* vgHash; // key:vgId, value:SVgroupInfo
SArray* vgArray;
SArray* vgArray; // SVgroupInfo
} SDBVgInfo;
typedef struct SUseDbOutput {
......@@ -260,6 +256,7 @@ int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst);
int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst);
int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst);
void freeVgInfo(SDBVgInfo* vgInfo);
void freeDbCfgInfo(SDbCfgInfo *pInfo);
extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen,
void* (*mallocFp)(int64_t));
......
......@@ -20,13 +20,13 @@
#include "tsimplehash.h"
#include "tstreamFileState.h"
#ifndef _STREAM_STATE_H_
#define _STREAM_STATE_H_
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _STREAM_STATE_H_
#define _STREAM_STATE_H_
// void* streamBackendInit(const char* path);
// void streamBackendCleanup(void* arg);
// SListNode* streamBackendAddCompare(void* backend, void* arg);
......@@ -86,9 +86,8 @@ typedef struct {
int64_t number;
} SStreamStateCur;
int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen);
int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key);
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen);
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册