提交 1f5058ae 编写于 作者: Y yihaoDeng

[TD-3178]<feature> merge develop and TD-4556

......@@ -131,10 +131,10 @@ cmake .. -DCPUTYPE=mips64 && cmake --build .
### On Windows platform
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
Please specify "x86_amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
Please specify "amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```cmd
mkdir debug && cd debug
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < x86_amd64 | x86 >
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < amd64 | x86 >
cmake .. -G "NMake Makefiles"
nmake
```
......
......@@ -122,10 +122,14 @@ IF (TD_LINUX)
ADD_DEFINITIONS(-D_TD_NINGSI_60)
MESSAGE(STATUS "set ningsi macro to true")
ENDIF ()
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
IF (TD_MEMORY_SANITIZER)
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG")
ELSE ()
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
ENDIF ()
SET(RELEASE_FLAGS "-O3 -Wno-error")
IF (${COVER} MATCHES "true")
MESSAGE(STATUS "Test coverage mode, add extra flags")
SET(GCC_COVERAGE_COMPILE_FLAGS "-fprofile-arcs -ftest-coverage")
......@@ -144,7 +148,11 @@ IF (TD_DARWIN_64)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "darwin64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
IF (TD_MEMORY_SANITIZER)
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -O0 -g3 -DDEBUG")
ELSE ()
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
ENDIF ()
SET(RELEASE_FLAGS "-Og")
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
......@@ -162,7 +170,14 @@ IF (TD_WINDOWS)
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF ()
SET(DEBUG_FLAGS "/fsanitize=thread /fsanitize=leak /fsanitize=memory /fsanitize=undefined /fsanitize=hwaddress /Zi /W3 /GL")
IF (TD_MEMORY_SANITIZER)
MESSAGE("memory sanitizer detected as true")
SET(DEBUG_FLAGS "/fsanitize=address /Zi /W3 /GL")
ELSE ()
MESSAGE("memory sanitizer detected as false")
SET(DEBUG_FLAGS "/Zi /W3 /GL")
ENDIF ()
SET(RELEASE_FLAGS "/W0 /O3 /GL")
ENDIF ()
......@@ -171,7 +186,7 @@ IF (TD_WINDOWS)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/regex)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/wepoll/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MsvcLibX/include)
ENDIF ()
ENDIF ()
IF (TD_WINDOWS_64)
ADD_DEFINITIONS(-D_M_X64)
......
......@@ -83,3 +83,8 @@ SET(TD_BUILD_JDBC TRUE)
IF (${BUILD_JDBC} MATCHES "false")
SET(TD_BUILD_JDBC FALSE)
ENDIF ()
SET(TD_MEMORY_SANITIZER FALSE)
IF (${MEMORY_SANITIZER} MATCHES "true")
SET(TD_MEMORY_SANITIZER TRUE)
ENDIF ()
......@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.29.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.30.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
......
......@@ -15,6 +15,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [命令行程序TAOS](/getting-started#console):访问TDengine的简便方式
* [极速体验](/getting-started#demo):运行示例程序,快速体验高效的数据插入、查询
* [支持平台列表](/getting-started#platforms):TDengine服务器和客户端支持的平台列表
* [Kubenetes部署](https://taosdata.github.io/TDengine-Operator/zh/index.html):TDengine在Kubenetes环境进行部署的详细说明
## [整体架构](/architecture)
......
......@@ -176,7 +176,7 @@ TDengine 分布式架构的逻辑结构图如下:
**通讯方式:**TDengine系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过TCP/UDP进行的。因为考虑到物联网场景,数据写入的包一般不大,因此TDengine 除采用TCP做传输之外,还采用UDP方式,因为UDP 更加高效,而且不受连接数的限制。TDengine实现了自己的超时、重传、确认等机制,以确保UDP的可靠传输。对于数据量不到15K的数据包,采取UDP的方式进行传输,超过15K的,或者是查询类的操作,自动采取TCP的方式进行传输。同时,TDengine根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用TCP方式进行数据传输。
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。另外,这个参数值的长度需要控制在 96 个字符以内。
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)
......
......@@ -325,10 +325,12 @@ for (int i = 0; i < numOfRows; i++){
}
s.setString(2, s2, 10);
// AddBatch 之后,可以再设定新的表名、TAGS、VALUES 取值,这样就能实现一次执行向多个数据表写入
// AddBatch 之后,缓存并未清空。为避免混乱,并不推荐在 ExecuteBatch 之前再次绑定新一批的数据
s.columnDataAddBatch();
// 执行语句:
// 执行绑定数据后的语句:
s.columnDataExecuteBatch();
// 执行语句后清空缓存。在清空之后,可以复用当前的对象,绑定新的一批数据(可以是新表名、新 TAGS 值、新 VALUES 值):
s.columnDataClearBatch();
// 执行完毕,释放资源:
s.columnDataCloseBatch();
```
......@@ -361,6 +363,7 @@ public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
```
其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。
### <a class="anchor" id="subscribe"></a>订阅
......
......@@ -301,12 +301,14 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
2. 调用 `taos_stmt_prepare` 解析 INSERT 语句;
3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname` 来设置表名;
4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags` 来设置表名和 TAGS 的值;
5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值;
5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param` 以单行的方式设置 VALUES 的值
6. 调用 `taos_stmt_add_batch` 把当前绑定的参数加入批处理;
7. 可以重复第 3~6 步,为批处理加入更多的数据行;
8. 调用 `taos_stmt_execute` 执行已经准备好的批处理指令;
9. 执行完毕,调用 `taos_stmt_close` 释放所有资源。
说明:如果 `taos_stmt_execute` 执行成功,假如不需要改变 SQL 语句的话,那么是可以复用 `taos_stmt_prepare` 的解析结果,直接进行第 3~6 步绑定新数据的。但如果执行出错,那么并不建议继续在当前的环境上下文下继续工作,而是建议释放资源,然后从 `taos_stmt_init` 步骤重新开始。
除 C/C++ 语言外,TDengine 的 Java 语言 JNI Connector 也提供参数绑定接口支持,具体请另外参见:[参数绑定接口的 Java 用法](https://www.taosdata.com/cn/documentation/connector/java#stmt-java)
接口相关的具体函数如下(也可以参考 [apitest.c](https://github.com/taosdata/TDengine/blob/develop/tests/examples/c/apitest.c) 文件中使用对应函数的方式):
......@@ -338,17 +340,17 @@ typedef struct TAOS_BIND {
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
(2.1.1.0 版本新增)
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值
当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
(2.1.2.0 版本新增)
(2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值
当 SQL 语句中的表名和 TAGS 都使用了 `?` 占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列)。tags 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。
- `int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind)`
(2.1.1.0 版本新增)
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值
以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下:
```c
......@@ -378,6 +380,11 @@ typedef struct TAOS_MULTI_BIND {
执行完毕,释放所有资源。
- `char * taos_stmt_errstr(TAOS_STMT *stmt)`
(2.1.3.0 版本新增)
用于在其他 stmt API 返回错误(返回错误码或空指针)时获取错误信息。
### 连续查询接口
TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时间段,对一张或多张数据库的表(数据流)进行各种实时聚合计算操作。操作简单,仅有打开、关闭流的API。具体如下:
......
......@@ -99,7 +99,7 @@ taosd -C
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是工作的,一般无需设置。**注意:配置修改后,需要重启*taosd*服务才能生效。**
- firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。这个参数值的长度需要控制在 96 个字符以内。
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。(RESTful服务使用的端口号是在此基础上+11,即默认值为6041。)
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。
......
......@@ -1306,7 +1306,7 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
- 数据库名最大长度为 32
- 表名最大长度为 192,每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳
- 标签最多允许 128 个,可以 1 个,标签总长度不超过 16k 个字符
- 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符
- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
......
# TDengine Documentation
TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Model and Data Modeling sections. In addition to this document, you should also download and read our technology white paper. For the older TDengine version 1.6 documentation, please click here.
## [TDengine Introduction](/evaluation)
* [TDengine Introduction and Features](/evaluation#intro)
* [TDengine Use Scenes](/evaluation#scenes)
* [TDengine Performance Metrics and Verification]((/evaluation#))
## [Getting Started](/getting-started)
* [Quickly Install](/getting-started#install): install via source code/package / Docker within seconds
- [Easy to Launch](/getting-started#start): start / stop TDengine with systemctl
- [Command-line](/getting-started#console) : an easy way to access TDengine server
- [Experience Lightning Speed](/getting-started#demo): running a demo, inserting/querying data to experience faster speed
- [List of Supported Platforms](/getting-started#platforms): a list of platforms supported by TDengine server and client
## [Overall Architecture](/architecture)
- [Data Model](/architecture#model): relational database model, but one table for one device with static tags
- [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQL, support scale-out and high-reliability
- [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data will be separated from time-series data, segmented by vnode and time
- [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement is sent back to client, while supporting multi-replicas
- [Caching and Persistence](/architecture#persistence): latest records are cached in memory, but are written in columnar format with an ultra-high compression ratio
- [Data Query](/architecture#query): support various functions, time-axis aggregation, interpolation, and multi-table aggregation
## [Data Modeling](/model)
- [Create a Library](/model#create-db): create a library for all data collection points with similar features
- [Create a Super Table(STable)](/model#create-stable): create a STable for all data collection points with the same type
- [Create a Table](/model#create-table): use STable as the template, to create a table for each data collecting point
## [TAOS SQL](/taos-sql)
- [Data Types](/taos-sql#data-type): support timestamp, int, float, nchar, bool, and other types
- [Database Management](/taos-sql#management): add, drop, check databases
- [Table Management](/taos-sql#table): add, drop, check, alter tables
- [STable Management](/taos-sql#super-table): add, drop, check, alter STables
- [Tag Management](/taos-sql#tags): add, drop, alter tags
- [Inserting Records](/taos-sql#insert): support to write single/multiple items per table, multiple items across tables, and support to write historical data
- [Data Query](/taos-sql#select): support time segment, value filtering, sorting, manual paging of query results, etc
- [SQL Function](/taos-sql#functions): support various aggregation functions, selection functions, and calculation functions, such as avg, min, diff, etc
- [Time Dimensions Aggregation](/taos-sql#aggregation): aggregate and reduce the dimension after cutting table data by time segment
- [Boundary Restrictions](/taos-sql#limitation): restrictions for the library, table, SQL, and others
- [Error Code](/taos-sql/error-code): TDengine 2.0 error codes and corresponding decimal codes
## [Efficient Data Ingestion](/insert)
- [SQL Ingestion](/insert#sql): write one or multiple records into one or multiple tables via SQL insert command
- [Prometheus Ingestion](/insert#prometheus): Configure Prometheus to write data directly without any code
- [Telegraf Ingestion](/insert#telegraf): Configure Telegraf to write collected data directly without any code
- [EMQ X Broker](/insert#emq): Configure EMQ X to write MQTT data directly without any code
- [HiveMQ Broker](/insert#hivemq): Configure HiveMQ to write MQTT data directly without any code
## [Efficient Data Querying](/queries)
- [Main Query Features](/queries#queries): support various standard functions, setting filter conditions, and querying per time segment
- [Multi-table Aggregation Query](/queries#aggregation): use STable and set tag filter conditions to perform efficient aggregation queries
- [Downsampling to Query Value](/queries#sampling): aggregate data in successive time windows, support interpolation
## [Advanced Features](/advanced-features)
- [Continuous Query](/advanced-features#continuous-query): Based on sliding windows, the data stream is automatically queried and calculated at regular intervals
- [Data Publisher/Subscriber](/advanced-features#subscribe): subscribe to the newly arrived data like a typical messaging system
- [Cache](/advanced-features#cache): the newly arrived data of each device/table will always be cached
- [Alarm Monitoring](/advanced-features#alert): automatically monitor out-of-threshold data, and actively push it based-on configuration rules
## [Connector](/connector)
- [C/C++ Connector](/connector#c-cpp): primary method to connect to TDengine server through libtaos client library
- [Java Connector(JDBC)](/connector/java): driver for connecting to the server from Java applications using the JDBC API
- [Python Connector](/connector#python): driver for connecting to TDengine server from Python applications
- [RESTful Connector](/connector#restful): a simple way to interact with TDengine via HTTP
- [Go Connector](/connector#go): driver for connecting to TDengine server from Go applications
- [Node.js Connector](/connector#nodejs): driver for connecting to TDengine server from Node.js applications
- [C# Connector](/connector#csharp): driver for connecting to TDengine server from C# applications
- [Windows Client](https://www.taosdata.com/blog/2019/07/26/514.html): compile your own Windows client, which is required by various connectors on the Windows environment
## [Connections with Other Tools](/connections)
- [Grafana](/connections#grafana): query the data saved in TDengine and provide visualization
- [Matlab](/connections#matlab): access data stored in TDengine server via JDBC configured within Matlab
- [R](/connections#r): access data stored in TDengine server via JDBC configured within R
- [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html): use TDengine visually through IDEA Database Management Tool
## [Installation and Management of TDengine Cluster](/cluster)
- [Preparation](/cluster#prepare): important considerations before deploying TDengine for production usage
- [Create Your First Node](/cluster#node-one): simple to follow the quick setup
- [Create Subsequent Nodes](/cluster#node-other): configure taos.cfg for new nodes to add more to the existing cluster
- [Node Management](/cluster#management): add, delete, and check nodes in the cluster
- [High-availability of Vnode](/cluster#high-availability): implement high-availability of Vnode through multi-replicas
- [Mnode Management](/cluster#mnode): automatic system creation without any manual intervention
- [Load Balancing](/cluster#load-balancing): automatically performed once the number of nodes or load changes
- [Offline Node Processing](/cluster#offline): any node that offline for more than a certain period will be removed from the cluster
- [Arbitrator](/cluster#arbitrator): used in the case of an even number of replicas to prevent split-brain
## [TDengine Operation and Maintenance](/administrator)
- [Capacity Planning](/administrator#planning): Estimating hardware resources based on scenarios
- [Fault Tolerance and Disaster Recovery](/administrator#tolerance): set the correct WAL and number of data replicas
- [System Configuration](/administrator#config): port, cache size, file block size, and other system configurations
- [User Management](/administrator#user): add/delete TDengine users, modify user password
- [Import Data](/administrator#import): import data into TDengine from either script or CSV file
- [Export Data](/administrator#export): export data either from TDengine shell or from the taosdump tool
- [System Monitor](/administrator#status): monitor the system connections, queries, streaming calculation, logs, and events
- [File Directory Structure](/administrator#directories): directories where TDengine data files and configuration files located
- [Parameter Restrictions and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter restrictions and reserved keywords
## TDengine Technical Design
- [System Module](/architecture/taosd): taosd functions and modules partitioning
- [Data Replication](/architecture/replica): support real-time synchronous/asynchronous replication, to ensure high-availability of the system
- [Technical Blog](https://www.taosdata.com/cn/blog/?categories=3): More technical analysis and architecture design articles
## Common Tools
- [TDengine sample import tools](https://www.taosdata.com/blog/2020/01/18/1166.html)
- [TDengine performance comparison test tools](https://www.taosdata.com/blog/2020/01/18/1166.html)
- [Use TDengine visually through IDEA Database Management Tool](https://www.taosdata.com/blog/2020/08/27/1767.html)
## [Performance: TDengine vs Others
- [Performance: TDengine vs InfluxDB with InfluxDB’s open-source performance testing tool](https://www.taosdata.com/blog/2020/01/13/1105.html)
- [Performance: TDengine vs OpenTSDB](https://www.taosdata.com/blog/2019/08/21/621.html)
- [Performance: TDengine vs Cassandra](https://www.taosdata.com/blog/2019/08/14/573.html)
- [Performance: TDengine vs InfluxDB](https://www.taosdata.com/blog/2019/07/19/419.html)
- [Performance Test Reports of TDengine vs InfluxDB/OpenTSDB/Cassandra/MySQL/ClickHouse](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
## More on IoT Big Data
- [Characteristics of IoT and Industry Internet Big Data](https://www.taosdata.com/blog/2019/07/09/characteristics-of-iot-big-data/)
- [Features and Functions of IoT Big Data platforms](https://www.taosdata.com/blog/2019/07/29/542.html)
- [Why don’t General Big Data Platforms Fit IoT Scenarios?](https://www.taosdata.com/blog/2019/07/09/why-does-the-general-big-data-platform-not-fit-iot-data-processing/)
- [Why TDengine is the best choice for IoT, Internet of Vehicles, and Industry Internet Big Data platforms?](https://www.taosdata.com/blog/2019/07/09/why-tdengine-is-the-best-choice-for-iot-big-data-processing/)
## FAQ
- [FAQ: Common questions and answers](/faq)
\ No newline at end of file
# TDengine Introduction
## <a class="anchor" id="intro"></a> About TDengine
TDengine is an innovative Big Data processing product launched by Taos Data in the face of the fast-growing Internet of Things (IoT) Big Data market and technical challenges. It does not rely on any third-party software, nor does it optimize or package any open-source database or stream computing product. Instead, it is a product independently developed after absorbing the advantages of many traditional relational databases, NoSQL databases, stream computing engines, message queues, and other software. TDengine has its own unique Big Data processing advantages in time-series space.
One of the modules of TDengine is the time-series database. However, in addition to this, to reduce the complexity of research and development and the difficulty of system operation, TDengine also provides functions such as caching, message queuing, subscription, stream computing, etc. TDengine provides a full-stack technical solution for the processing of IoT and Industrial Internet BigData. It is an efficient and easy-to-use IoT Big Data platform. Compared with typical Big Data platforms such as Hadoop, TDengine has the following distinct characteristics:
- **Performance improvement over 10 times**: An innovative data storage structure is defined, with each single core can process at least 20,000 requests per second, insert millions of data points, and read more than 10 million data points, which is more than 10 times faster than other existing general database.
- **Reduce the cost of hardware or cloud services to 1/5**: Due to its ultra-performance, TDengine’s computing resources consumption is less than 1/5 of other common Big Data solutions; through columnar storage and advanced compression algorithms, the storage consumption is less than 1/10 of other general databases.
- **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance.
- **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and Matlab.
- **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, Matlab, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to.
- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C + +/C#/Go/Node.js, and similar to MySQL with zero learning cost.
With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources.
![TDengine Technology Ecosystem](page://images/eco_system.png)
<center>Figure 1. TDengine Technology Ecosystem</center>
## <a class="anchor" id="scenes"></a>Overall Scenarios of TDengine
As an IoT Big Data platform, the typical application scenarios of TDengine are mainly presented in the IoT category, with users having a certain amount of data. The following sections of this document are mainly aimed at IoT-relevant systems. Other systems, such as CRM, ERP, etc., are beyond the scope of this article.
### Characteristics and Requirements of Data Sources
From the perspective of data sources, designers can analyze the applicability of TDengine in target application systems as following.
| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
| A huge amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure matching high compression ratio to achieve the best storage efficiency in the industry. |
| Data input velocity is occasionally or continuously huge | | | √ | TDengine's performance is much higher than other similar products. It can continuously process a large amount of input data in the same hardware environment, and provide a performance evaluation tool that can easily run in the user environment. |
| A huge amount of data sources | | | √ | TDengine is designed to include optimizations specifically for a huge amount of data sources, such as data writing and querying, which is especially suitable for efficiently processing massive (tens of millions or more) data sources. |
### System Architecture Requirements
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require a simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions, and no need to integrate any additional third-party products. |
| Require fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability functions such as fault tolerance and disaster recovery. |
| Standardization specifications | | | √ | TDengine uses standard SQL language to provide main functions and follow standardization specifications. |
### System Function Requirements
| | | | | |
| ----------------------------------------------------- | ------------------ | -------------------- | ------------------- | ------------------------------------------------------------ |
| **System Function Requirements** | **Not Applicable** | **Might Applicable** | **Very Applicable** | **Description** |
| Require completed data processing algorithms built-in | | √ | | TDengine implements various general data processing algorithms, but has not properly handled all requirements of different industries, so special types of processing shall be processed at the application level. |
| Require a huge amount of crosstab queries | | √ | | This type of processing should be handled more by relational database systems, or TDengine and relational database systems should fit together to implement system functions. |
### System Performance Requirements
| | | | | |
| -------------------------------------------- | ------------------ | -------------------- | ------------------- | ------------------------------------------------------------ |
| **System Performance Requirements** | **Not Applicable** | **Might Applicable** | **Very Applicable** | **Description** |
| Require larger total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server-cooperating. |
| Require high-speed data processing | | | √ | TDengine’s storage and data processing are designed to be optimized for IoT, can generally improve the processing speed by multiple times than other similar products. |
| Require fast processing of fine-grained data | | | √ | TDengine has achieved the same level of performance with relational and NoSQL data processing systems. |
### System Maintenance Requirements
| | | | | |
| -------------------------------------------- | ------------------ | -------------------- | ------------------- | ------------------------------------------------------------ |
| **System Maintenance Requirements** | **Not Applicable** | **Might Applicable** | **Very Applicable** | **Description** |
| Require system with high-reliability | | | √ | TDengine has a very robust and reliable system architecture to implement simple and convenient daily operation with streamlined experiences for operators, thus human errors and accidents are eliminated to the greatest extent. |
| Require controllable operation learning cost | | | √ | As above. |
| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counselling services. |
\ No newline at end of file
# Quick Start
## <a class="anchor" id="install"></a>Quick Install
TDegnine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDegnine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package).
### <a class="anchor" id="source-install"></a>Install from Source
Please visit our [TDengine github page](https://github.com/taosdata/TDengine) for instructions on installation from the source code.
### Install from Docker Container
Please visit our [TDengine Official Docker Image: Distribution, Downloading, and Usage](https://www.taosdata.com/blog/2020/05/13/1509.html).
### <a class="anchor" id="package-install"></a>Install from Package
It’s extremely easy to install for TDegnine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs:
Click [here](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) to download the install package.
For more about installation process, please refer [TDengine Installation Packages: Install and Uninstall](https://www.taosdata.com/blog/2019/08/09/566.html), and [Video Tutorials](https://www.taosdata.com/blog/2020/11/11/1941.html).
## <a class="anchor" id="start"></a>Quick Launch
After installation, you can start the TDengine service by the `systemctl` command.
```bash
```
$ systemctl start taosd
```
```
Then check if the service is working now.
```bash
```
$ systemctl status taosd
```
```
If the service is running successfully, you can play around through TDengine shell `taos`.
**Note:**
- The `systemctl` command needs the **root** privilege. Use **sudo** if you are not the **root** user.
- To get better product feedback and improve our solution, TDegnine will collect basic usage information, but you can modify the configuration parameter **telemetryReporting** in the system configuration file taos.cfg, and set it to 0 to turn it off.
- TDegnine uses FQDN (usually hostname) as the node ID. In order to ensure normal operation, you need to set hostname for the server running taosd, and configure DNS service or hosts file for the machine running client application, to ensure the FQDN can be resolved.
- TDengine supports installation on Linux systems with[ systemd ](https://en.wikipedia.org/wiki/Systemd)as the process service management, and uses `which systemctl` command to detect whether `systemd` packages exist in the system:
- ```bash
```
- $ which systemctl
- ```
If `systemd` is not supported in the system, TDengine service can also be launched via `/usr/local/taos/bin/taosd` manually.
## <a class="anchor" id="console"></a>TDengine Shell Command Line
To launch TDengine shell, the command line interface, in a Linux terminal, type:
```bash
```
$ taos
```
```
The welcome message is printed if the shell connects to TDengine server successfully, otherwise, an error message will be printed (refer to our [FAQ](https://www.taosdata.com/en/faq) page for troubleshooting the connection error). The TDengine shell prompt is:
```cmd
```
taos>
```
```
In the TDengine shell, you can create databases, create tables and insert/query data with SQL. Each query command ends with a semicolon. It works like MySQL, for example:
```mysql
```
create database demo;
use demo;
create table t (ts timestamp, speed int);
insert into t values ('2019-07-15 00:00:00', 10);
insert into t values ('2019-07-15 01:00:00', 20);
select * from t;
ts | speed |
===================================
19-07-15 00:00:00.000| 10|
19-07-15 01:00:00.000| 20|
Query OK, 2 row(s) in set (0.001700s)
```
```
Besides the SQL commands, the system administrator can check system status, add or delete accounts, and manage the servers.
### Shell Command Line Parameters
You can configure command parameters to change how TDengine shell executes. Some frequently used options are listed below:
- -c, --config-dir: set the configuration directory. It is */etc/taos* by default.
- -h, --host: set the IP address of the server it will connect to. Default is localhost.
- -s, --commands: set the command to run without entering the shell.
- -u, -- user: user name to connect to server. Default is root.
- -p, --password: password. Default is 'taosdata'.
- -?, --help: get a full list of supported options.
Examples:
```bash
```
$ taos -h 192.168.0.1 -s "use db; show tables;"
```
```
### Run SQL Command Scripts
Inside TDengine shell, you can run SQL scripts in a file with source command.
```mysql
```
taos> source <filename>;
```
```
### Shell Tips
- Use up/down arrow key to check the command history
- To change the default password, use "alter user" command
- Use ctrl+c to interrupt any queries
- To clean the schema of local cached tables, execute command `RESET QUERY CACHE`
## <a class="anchor" id="demo"></a>Experience TDengine’s Lightning Speed
After starting the TDengine server, you can execute the command `taosdemo` in the Linux terminal.
```bash
```
$ taosdemo
```
```
Using this command, a STable named `meters` will be created in the database `test` There are 10k tables under this stable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai".
It takes about 10 minutes to execute this command. Once finished, 1 billion rows of records will be inserted.
In the TDengine client, enter sql query commands and then experience our lightning query speed.
- query total rows of records:
```mysql
```
taos> select count(*) from test.meters;
```
```
- query average, max and min of the total 1 billion records:
```mysql
```
taos> select avg(f1), max(f2), min(f3) from test.meters;
```
```
- query the number of records where loc="beijing":
```mysql
```
taos> select count(*) from test.meters where loc="beijing";
```
```
- query the average, max and min of total records where areaid=10:
```mysql
```
taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10;
```
```
- query the average, max, min from table t10 when aggregating over every 10s:
```mysql
```
taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
```
```
**Note**: you can run command `taosdemo` with many options, like number of tables, rows of records and so on. To know more about these options, you can execute `taosdemo --help` and then take a try using different options.
## Client and Alarm Module
If your client and server running on different machines, please install the client separately. Linux and Windows packages are provided:
- TDengine-client-2.0.10.0-Linux-x64.tar.gz(3.0M)
- TDengine-client-2.0.10.0-Windows-x64.exe(2.8M)
- TDengine-client-2.0.10.0-Windows-x86.exe(2.8M)
Linux package of Alarm Module is as following (please refer [How to Use Alarm Module](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)):
- TDengine-alert-2.0.10.0-Linux-x64.tar.gz (8.1M)
## <a class="anchor" id="platforms"></a>List of Supported Platforms
List of platforms supported by TDengine server
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | UnionTech UOS | NeoKylin | LINX V60/V80 |
| ------------------ | ---------------- | ------------------- | --------------- | ------------- | -------- | ------------ |
| X64 | ● | ● | | ○ | ● | ● |
| Raspberry ARM32 | | ● | ● | | | |
| Loongson MIPS64 | | | ● | | | |
| Kunpeng ARM64 | | ○ | ○ | | ● | |
| SWCPU Alpha64 | | | ○ | ● | | |
| FT ARM64 | | ○Ubuntu Kylin | | | | |
| Hygon X64 | ● | ● | ● | ○ | ● | ● |
| Rockchip ARM64/32 | | | ○ | | | |
| Allwinner ARM64/32 | | | ○ | | | |
| Actions ARM64/32 | | | ○ | | | |
| TI ARM32 | | | ○ | | | |
Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
List of platforms supported by TDengine client and connectors
At the moment, TDengine connectors can support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and development environments such as Linux/Win64/Win32.
Comparison matrix as following:
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS Godson** | **Alpha Shenwei** | **X64 TimecomTech** |
| ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | --------------- | ----------------- | ------------------- |
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** |
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
Please visit [Connectors](https://www.taosdata.com/cn/documentation/connector) section for more detailed information.
\ No newline at end of file
此差异已折叠。
# Efficient Data Writing
TDengine supports multiple interfaces to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in a single piece or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, nonsequential data insertion, and also historical data insertion.
## <a class="anchor" id="sql"></a> SQL Writing
Applications insert data by executing SQL insert statements through C/C + +, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
```mysql
```
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
```
```
TDengine supports writing multiple records at a time. For example, the following command writes two records to table d1001:
```mysql
```
INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25);
```
```
TDengine also supports writing data to multiple tables at a time. For example, the following command writes two records to d1001 and one record to d1002:
```mysql
```
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
```
```
For the SQL INSERT Grammar, please refer to [Taos SQL insert](https://www.taosdata.com/en/documentation/taos-sql#insert)。
**Tips:**
- To improve writing efficiency, batch writing is required. The more records written in a batch, the higher the insertion efficiency. However, a record cannot exceed 16K, and the total length of an SQL statement cannot exceed 64K (it can be configured by parameter maxSQLLength, and the maximum can be configured to 1M).
- TDengine supports multi-thread parallel writing. To further improve writing speed, a client needs to open more than 20 threads to write parallelly. However, after the number of threads reaches a certain threshold, it cannot be increased or even become decreased, because too much frequent thread switching brings extra overhead.
- For a same table, if the timestamp of a newly inserted record already exists, (no database was created using UPDATE 1) the new record will be discarded as default, that is, the timestamp must be unique in a table. If an application automatically generates records, it is very likely that the generated timestamps will be the same, so the number of records successfully inserted will be smaller than the number of records the application try to insert. If you use UPDATE 1 option when creating a database, inserting a new record with the same timestamp will overwrite the original record.
- The timestamp of written data must be greater than the current time minus the time of configuration parameter keep. If keep is configured for 3650 days, data older than 3650 days cannot be written. The timestamp for writing data cannot be greater than the current time plus configuration parameter days. If days is configured to 2, data 2 days later than the current time cannot be written.
## <a class="anchor" id="prometheus"></a> Direct Writing of Prometheus
As a graduate project of Cloud Native Computing Foundation, [Prometheus](https://www.prometheus.io/) is widely used in the field of performance monitoring and K8S performance monitoring. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Prometheus without any code, and can directly write the data collected by Prometheus into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
### Compile blm_prometheus From Source
Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares:
- A server running Linux OS
- Golang version 1.10 and higher installed
- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
Bailongma project has a folder, blm_prometheus, which holds the prometheus writing API. The compiling process is as follows:
```bash
```
cd blm_prometheus
go build
```
```
If everything goes well, an executable of blm_prometheus will be generated in the corresponding directory.
### Install Prometheus
Download and install as the instruction of Prometheus official website. [Download Address](https://prometheus.io/download/)
### Configure Prometheus
Read the Prometheus [configuration document](https://prometheus.io/docs/prometheus/latest/configuration/configuration/) and add following configurations in the section of Prometheus configuration file
- url: The URL provided by bailongma API service, refer to the blm_prometheus startup example section below
After Prometheus launched, you can check whether data is written successfully through query taos client.
### Launch blm_prometheus
blm_prometheus has following options that you can configure when you launch blm_prometheus.
```sh
--tdengine-name
If TDengine is installed on a server with a domain name, you can also access the TDengine by configuring the domain name of it. In K8S environment, it can be configured as the service name that TDengine runs
--batch-size
blm_prometheus assembles the received prometheus data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time.
--dbname
Set a name for the database created in TDengine, blm_prometheus will automatically create a database named dbname in TDengine, and the default value is prometheus.
--dbuser
Set the user name to access TDengine, the default value is'root '
--dbpassword
Set the password to access TDengine, the default value is'taosdata '
--port
The port number blm_prometheus used to serve prometheus.
```
### Example
Launch an API service for blm_prometheus with the following command:
```bash
./blm_prometheus -port 8088
```
Assuming that the IP address of the server where blm_prometheus located is "10.1.2. 3", the URL shall be added to the configuration file of Prometheus as:
remote_write:
\- url: "http://10.1.2.3:8088/receive"
### Query written data of prometheus
The format of generated data by Prometheus is as follows:
```json
{
Timestamp: 1576466279341,
Value: 37.000000,
apiserver_request_latencies_bucket {
component="apiserver",
instance="192.168.99.116:8443",
job="kubernetes-apiservers",
le="125000",
resource="persistentvolumes", s
cope="cluster",
verb="LIST",
version=“v1"
}
}
```
Where apiserver_request_latencies_bucket is the name of the time-series data collected by prometheus, and the tag of the time-series data is in the following {}. blm_prometheus automatically creates a STable in TDengine with the name of the time series data, and converts the tag in {} into the tag value of TDengine, with Timestamp as the timestamp and value as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction.
```mysql
use prometheus;
select * from apiserver_request_latencies_bucket;
```
## <a class="anchor" id="telegraf"></a> Direct Writing of Telegraf
[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is a popular open source tool for IT operation data collection. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Telegraf without any code, and can directly write the data collected by Telegraf into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
### Compile blm_telegraf From Source Code
Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares:
- A server running Linux OS
- Golang version 1.10 and higher installed
- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
Bailongma project has a folder, blm_telegraf, which holds the Telegraf writing API. The compiling process is as follows:
```bash
cd blm_telegraf
go build
```
If everything goes well, an executable of blm_telegraf will be generated in the corresponding directory.
### Install Telegraf
At the moment, TDengine supports Telegraf version 1.7. 4 and above. Users can download the installation package on Telegraf's website according to your current operating system. The download address is as follows: https://portal.influxdata.com/downloads
### Configure Telegraf
Modify the TDengine-related configurations in the Telegraf configuration file /etc/telegraf/telegraf.conf.
In the output plugins section, add the [[outputs.http]] configuration:
- url: The URL provided by bailongma API service, please refer to the example section below
- data_format: "json"
- json_timestamp_units: "1ms"
In agent section:
- hostname: The machine name that distinguishes different collection devices, and it is necessary to ensure its uniqueness
- metric_batch_size: 100, which is the max number of records per batch wriiten by Telegraf allowed. Increasing the number can reduce the request sending frequency of Telegraf.
For information on how to use Telegraf to collect data and more about using Telegraf, please refer to the official [document](https://docs.influxdata.com/telegraf/v1.11/) of Telegraf.
### Launch blm_telegraf
blm_telegraf has following options, which can be set to tune configurations of blm_telegraf when launching.
```sh
--host
The ip address of TDengine server, default is null
--batch-size
blm_prometheus assembles the received telegraf data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time.
--dbname
Set a name for the database created in TDengine, blm_telegraf will automatically create a database named dbname in TDengine, and the default value is prometheus.
--dbuser
Set the user name to access TDengine, the default value is 'root '
--dbpassword
Set the password to access TDengine, the default value is'taosdata '
--port
The port number blm_telegraf used to serve Telegraf.
```
### Example
Launch an API service for blm_telegraf with the following command
```bash
./blm_telegraf -host 127.0.0.1 -port 8089
```
Assuming that the IP address of the server where blm_telegraf located is "10.1.2. 3", the URL shall be added to the configuration file of telegraf as:
```yaml
url = "http://10.1.2.3:8089/telegraf"
```
### Query written data of telegraf
The format of generated data by telegraf is as follows:
```json
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 89.7897897897898,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 5.405405405405405,
"usage_user": 4.804804804804805
},
"name": "cpu",
"tags": {
"cpu": "cpu2",
"host": "bogon"
},
"timestamp": 1576464360
}
```
Where the name field is the name of the time-series data collected by telegraf, and the tag field is the tag of the time-series data. blm_telegraf automatically creates a STable in TDengine with the name of the time series data, and converts the tag field into the tag value of TDengine, with Timestamp as the timestamp and fields values as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction.
```mysql
use telegraf;
select * from cpu;
```
MQTT is a popular data transmission protocol in the IoT. TDengine can easily access the data received by MQTT Broker and write it to TDengine.
## <a class="anchor" id="emq"></a> Direct Writing of EMQ Broker
[EMQ](https://github.com/emqx/emqx) is an open source MQTT Broker software, with no need of coding, only to use "rules" in EMQ Dashboard for simple configuration, and MQTT data can be directly written into TDengine. EMQ X supports storing data to the TDengine by sending it to a Web service, and also provides a native TDengine driver on Enterprise Edition for direct data store. Please refer to [EMQ official documents](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine) for more details.
## <a class="anchor" id="hivemq"></a> Direct Writing of HiveMQ Broker
[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details.
\ No newline at end of file
# Efficient Data Querying
## <a class="anchor" id="queries"></a> Main Query Features
TDengine uses SQL as the query language. Applications can send SQL statements through C/C + +, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions:
- Single-column and multi-column data query
- Multiple filters for tags and numeric values: >, <, =, < >, like, etc
- Group by, Order by, Limit/Offset of aggregation results
- Four operations for numeric columns and aggregation results
- Time stamp aligned join query (implicit join) operations
- Multiple aggregation/calculation functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff, etc
For example, in TAOS shell, the records with vlotage > 215 are queried from table d1001, sorted in descending order by timestamps, and only two records are outputted.
```mysql
taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
ts | current | voltage | phase |
======================================================================================
2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 |
2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 |
Query OK, 2 row(s) in set (0.001100s)
```
In order to meet the needs of an IoT scenario, TDengine supports several special functions, such as twa (time weighted average), spread (difference between maximum and minimum), last_row (last record), etc. More functions related to IoT scenarios will be added. TDengine also supports continuous queries.
For specific query syntax, please see the [Data Query section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#select).
## <a class="anchor" id="aggregation"></a> Multi-table Aggregation Query
In an IoT scenario, there are often multiple data collection points in a same type. TDengine uses the concept of STable to describe a certain type of data collection point, and an ordinary table to describe a specific data collection point. At the same time, TDengine uses tags to describe the statical attributes of data collection points. A given data collection point has a specific tag value. By specifying the filters of tags, TDengine provides an efficient method to aggregate and query the sub-tables of STables (data collection points of a certain type). Aggregation functions and most operations on ordinary tables are applicable to STables, and the syntax is exactly the same.
**Example 1**: In TAOS Shell, look up the average voltages collected by all smart meters in Beijing and group them by location
```mysql
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
avg(voltage) | location |
=============================================================
222.000000000 | Beijing.Haidian |
219.200000000 | Beijing.Chaoyang |
Query OK, 2 row(s) in set (0.002136s)
```
**Example 2**: In TAOS Shell, look up the number of records with groupId 2 in the past 24 hours, check the maximum current of all smart meters
```mysql
taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h;
cunt(*) | max(current) |
==================================
5 | 13.4 |
Query OK, 1 row(s) in set (0.002136s)
```
TDengine only allows aggregation queries between tables belonging to a same STable, means aggregation queries between different STables are not supported. In the Data Query section of TAOS SQL, query class operations will all be indicated that whether STables are supported.
## <a class="anchor" id="sampling"></a> Down Sampling Query, Interpolation
In a scenario of IoT, it is often necessary to aggregate the collected data by intervals through down sampling. TDengine provides a simple keyword interval, which makes query operations according to time windows extremely simple. For example, the current values collected by smart meter d1001 are summed every 10 seconds.
```mysql
taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
ts | sum(current) |
======================================================
2018-10-03 14:38:00.000 | 10.300000191 |
2018-10-03 14:38:10.000 | 24.900000572 |
Query OK, 2 row(s) in set (0.000883s)
```
The down sampling operation is also applicable to STables, such as summing the current values collected by all smart meters in Beijing every second.
```mysql
taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.000 | 10.199999809 |
2018-10-03 14:38:05.000 | 32.900000572 |
2018-10-03 14:38:06.000 | 11.500000000 |
2018-10-03 14:38:15.000 | 12.600000381 |
2018-10-03 14:38:16.000 | 36.000000000 |
Query OK, 5 row(s) in set (0.001538s)
```
The down sampling operation also supports time offset, such as summing the current values collected by all smart meters every second, but requires each time window to start from 500 milliseconds.
```mysql
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.500 | 11.189999809 |
2018-10-03 14:38:05.500 | 31.900000572 |
2018-10-03 14:38:06.500 | 11.600000000 |
2018-10-03 14:38:15.500 | 12.300000381 |
2018-10-03 14:38:16.500 | 35.000000000 |
Query OK, 5 row(s) in set (0.001521s)
```
In a scenario of IoT, it is difficult to synchronize the time stamp of collected data at each point, but many analysis algorithms (such as FFT) need to align the collected data strictly at equal intervals of time. In many systems, it’s required to write their own programs to process, but the down sampling operation of TDengine can be easily solved. If there is no collected data in an interval, TDengine also provides interpolation calculation function.
For details of syntax rules, please refer to the [Time-dimension Aggregation section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#aggregation).
\ No newline at end of file
此差异已折叠。
此差异已折叠。
# Connections with Other Tools
## <a class="anchor" id="grafana"></a> Grafana
TDengine can quickly integrate with [Grafana](https://www.grafana.com/), an open source data visualization system, to build a data monitoring and alarming system. The whole process does not require any code to write. The contents of the data table in TDengine can be visually showed on DashBoard.
### Install Grafana
TDengine currently supports Grafana 5.2.4 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows:
https://grafana.com/grafana/download.
### Configure Grafana
TDengine Grafana plugin is in the /usr/local/taos/connector/grafanaplugin directory.
Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana.
```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
```
### Use Grafana
#### Configure data source
You can log in the Grafana server (username/password:admin/admin) through localhost:3000, and add data sources through `Configuration -> Data Sources` on the left panel, as shown in the following figure:
![img](page://images/connections/add_datasource1.jpg)
Click `Add data source` to enter the Add Data Source page, and enter TDengine in the query box to select Add, as shown in the following figure:
![img](page://images/connections/add_datasource2.jpg)
Enter the data source configuration page and modify the corresponding configuration according to the default prompt:
![img](page://images/connections/add_datasource3.jpg)
- Host: IP address of any server in TDengine cluster and port number of TDengine RESTful interface (6041), default [http://localhost:6041](http://localhost:6041/)
- User: TDengine username.
- Password: TDengine user password.
Click `Save & Test` to test. Success will be prompted as follows:
![img](page://images/connections/add_datasource4.jpg)
#### Create Dashboard
Go back to the home to create Dashboard, and click `Add Query` to enter the panel query page:
![img](page://images/connections/create_dashboard1.jpg)
As shown in the figure above, select the TDengine data source in Query, and enter the corresponding sql in the query box below to query. Details are as follows:
- INPUT SQL: Enter the statement to query (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` , where `from`, `to` and `interval` are built-in variables of the TDengine plug-in, representing the query range and time interval obtained from the Grafana plug-in panel. In addition to built-in variables, it is also supported to use custom template variables.
- ALIAS BY: You can set alias for the current queries.
- GENERATE SQL: Clicking this button will automatically replace the corresponding variable and generate the final statement to execute.
According to the default prompt, query the average system memory usage at the specified interval of the server where the current TDengine deployed in as follows:
![img](page://images/connections/create_dashboard2.jpg)
> Please refer to Grafana [documents](https://grafana.com/docs/) for how to use Grafana to create the corresponding monitoring interface and for more about Grafana usage.
#### Import Dashboard
A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory/usr/local/taos/connector/grafana/tdengine/dashboard/.
Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file:
![img](page://images/connections/import_dashboard1.jpg)
You can see as follows after Dashboard imported.
![img](page://images/connections/import_dashboard2.jpg)
## <a class="anchor" id="matlab"></a> Matlab
MatLab can access data to the local workspace by connecting directly to the TDengine via the JDBC Driver provided in the installation package.
### JDBC Interface Adaptation of MatLab
Several steps are required to adapt Matlab to TDengine. Taking adapting Matlab2017a on Windows10 as an example:
- Copy the file JDBCDriver-1.0.0-dist.ja*r* in TDengine package to the directory ${matlab_root}\MATLAB\R2017a\java\jar\toolbox
- Copy the file taos.lib in TDengine package to ${matlab root dir}\MATLAB\R2017a\lib\win64
- Add the .jar package just copied to the Matlab classpath. Append the line below as the end of the file of ${matlab root dir}\MATLAB\R2017a\toolbox\local\classpath.txt
- ```
$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar
```
- Create a file called javalibrarypath.txt in directory ${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a_, and add the _taos.dll path in the file. For example, if the file taos.dll is in the directory of C:\Windows\System32,then add the following line in file javalibrarypath.txt:
- ```
C:\Windows\System32
```
- ### Connect to TDengine in MatLab to get data
After the above configured successfully, open MatLab.
- Create a connection:
```matlab
conn = database(db, root, taosdata, com.taosdata.jdbc.TSDBDriver, jdbc:TSDB://127.0.0.1:0/)
```
* Make a query:
```matlab
sql0 = [select * from tb]
data = select(conn, sql0);
```
* Insert a record:
```matlab
sql1 = [insert into tb values (now, 1)]
exec(conn, sql1)
```
For more detailed examples, please refer to the examples\Matlab\TDEngineDemo.m file in the package.
## <a class="anchor" id="r"></a> R
R language supports connection to the TDengine database through the JDBC interface. First, you need to install the JDBC package of R language. Launch the R language environment, and then execute the following command to install the JDBC support library for R language:
```R
install.packages('RJDBC', repos='http://cran.us.r-project.org')
```
After installed, load the RJDBC package by executing `library('RJDBC')` command.
Then load the TDengine JDBC driver:
```R
drv<-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-2.0.0-dist.jar", identifier.quote="\"")
```
If succeed, no error message will display. Then use the following command to try a database connection:
```R
conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","root","taosdata")
```
Please replace the IP address in the command above to the correct one. If no error message is shown, then the connection is established successfully, otherwise the connection command needs to be adjusted according to the error prompt. TDengine supports below functions in *RJDBC* package:
- `dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)`: Write the data in a data frame iris to the table test in the TDengine server. Parameter overwrite must be false. append must be TRUE and the schema of the data frame iris should be the same as the table test.
- `dbGetQuery(conn, "select count(*) from test")`: run a query command
- `dbSendUpdate (conn, "use db")`: Execute any non-query sql statement. For example, `dbSendUpdate (conn, "use db")`, write data `dbSendUpdate (conn, "insert into t1 values (now, 99)")`, and the like.
- `dbReadTable(conn, "test")`: read all the data in table test
- `dbDisconnect(conn)`: close a connection
- `dbRemoveTable(conn, "test")`: remove table test
The functions below are not supported currently:
- `dbExistsTable(conn, "test")`: if table test exists
- `dbListTables(conn)`: list all tables in the connection
\ No newline at end of file
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -266,6 +266,7 @@ typedef struct SSqlObj {
typedef struct SSqlStream {
SSqlObj *pSql;
void * cqhandle; // stream belong to SCQContext handle
const char* dstTable;
uint32_t streamId;
char listed;
......@@ -320,7 +321,7 @@ int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock);
void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlRes* pOutput);
void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pParent);
void destroyTableNameList(SInsertStatementParam* pInsertParam);
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
......
此差异已折叠。
......@@ -323,7 +323,7 @@ TAOS_ROW tscFetchRow(void *param) {
// current data set are exhausted, fetch more data from node
if (pRes->row >= pRes->numOfRows && (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) &&
(pCmd->command == TSDB_SQL_RETRIEVE ||
pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE ||
pCmd->command == TSDB_SQL_RETRIEVE_GLOBALMERGE ||
pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册