提交 33c6b5ce 编写于 作者: C Cary Xu

Merge branch 'develop' into feature/TD-4666

......@@ -23,7 +23,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维
TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是 [数据模型](https://www.taosdata.com/cn/documentation/architecture)[数据建模](https://www.taosdata.com/cn/documentation/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf)
# 生成
# 构建
TDengine目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、macOS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。本快速指南仅适用于通过源码安装。
......@@ -107,7 +107,7 @@ Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话
git submodule update --init --recursive
```
## 生成 TDengine
## 构建 TDengine
### Linux 系统
......@@ -116,6 +116,12 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
您可以选择使用 Jemalloc 作为内存分配器,替代默认的 glibc:
```bash
apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true
```
在X86-64、X86、arm64、arm32 和 mips64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
aarch64:
......
......@@ -110,6 +110,12 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
You can use Jemalloc as memory allocator instead of glibc:
```
apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true
```
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform.
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct:
......
......@@ -57,7 +57,7 @@ IF (TD_LINUX_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_LINUX_64)
MESSAGE(STATUS "linux64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV)
IF (JEMALLOC_ENABLED)
......@@ -70,7 +70,7 @@ IF (TD_LINUX_32)
ADD_DEFINITIONS(-D_TD_LINUX_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "linux32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_64)
......@@ -78,7 +78,7 @@ IF (TD_ARM_64)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_32)
......@@ -86,7 +86,7 @@ IF (TD_ARM_32)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
ENDIF ()
IF (TD_MIPS_64)
......@@ -94,7 +94,7 @@ IF (TD_MIPS_64)
ADD_DEFINITIONS(-D_TD_MIPS_64)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_MIPS_32)
......@@ -102,7 +102,7 @@ IF (TD_MIPS_32)
ADD_DEFINITIONS(-D_TD_MIPS_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_APLHINE)
......@@ -147,7 +147,7 @@ IF (TD_DARWIN_64)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "darwin64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
IF (TD_MEMORY_SANITIZER)
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -O0 -g3 -DDEBUG")
ELSE ()
......
......@@ -35,13 +35,13 @@ ENDIF ()
# Set compiler options
SET(COMMON_C_FLAGS "${COMMON_FLAGS} -std=gnu99")
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_FLAGS} ${RELEASE_FLAGS}")
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_C_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_C_FLAGS} ${RELEASE_FLAGS}")
# Set c++ compiler options
# SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11")
# SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}")
# SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}")
SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11 -Wno-unused-function")
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}")
IF (${CMAKE_BUILD_TYPE} MATCHES "Debug")
SET(CMAKE_BUILD_TYPE "Debug")
......
......@@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.30.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.32-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
......
......@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.1.1.0")
SET(TD_VER_NUMBER "2.1.3.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
......@@ -15,7 +15,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [命令行程序TAOS](/getting-started#console):访问TDengine的简便方式
* [极速体验](/getting-started#demo):运行示例程序,快速体验高效的数据插入、查询
* [支持平台列表](/getting-started#platforms):TDengine服务器和客户端支持的平台列表
* [Kubenetes部署](https://taosdata.github.io/TDengine-Operator/zh/index.html):TDengine在Kubenetes环境进行部署的详细说明
* [Kubernetes部署](https://taosdata.github.io/TDengine-Operator/zh/index.html):TDengine在Kubernetes环境进行部署的详细说明
## [整体架构](/architecture)
......@@ -42,7 +42,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入
* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等
* [SQL函数](/taos-sql#functions):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等
* [时间维度聚合](/taos-sql#aggregation):将表中数据按照时间段进行切割后聚合,降维处理
* [窗口切分聚合](/taos-sql#aggregation):将表中数据按照时间段等方式进行切割后聚合,降维处理
* [边界限制](/taos-sql#limitation):库、表、SQL等边界限制条件
* [错误码](/taos-sql/error-code):TDengine 2.0 错误码以及对应的十进制码
......@@ -63,7 +63,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## [高级功能](/advanced-features)
* [连续查询(Continuous Query)](/advanced-features#continuous-query):基于滑动窗口,定时自动的对数据流进行查询计算
* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe)典型的消息队列,应用可订阅接收到的最新数据
* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe)类似典型的消息队列,应用可订阅接收到的最新数据
* [缓存(Cache)](/advanced-features#cache):每个设备最新的数据都会缓存在内存中,可快速获取
* [报警监测](/advanced-features#alert):根据配置规则,自动监测超限行为数据,并主动推送
......@@ -81,7 +81,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## [与其他工具的连接](/connections)
* [Grafana](/connections#grafana):获取并可视化保存在TDengine的数据
* [Matlab](/connections#matlab):通过配置Matlab的JDBC数据源访问保存在TDengine的数据
* [MATLAB](/connections#matlab):通过配置MATLAB的JDBC数据源访问保存在TDengine的数据
* [R](/connections#r):通过配置R的JDBC数据源访问保存在TDengine的数据
* [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html):通过IDEA 数据库管理工具可视化使用 TDengine
......@@ -106,6 +106,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [数据导入](/administrator#import):可按脚本文件导入,也可按数据文件导入
* [数据导出](/administrator#export):从shell按表导出,也可用taosdump工具做各种导出
* [系统监控](/administrator#status):检查系统现有的连接、查询、流式计算,日志和事件等
* [性能优化](/administrator#optimize):对长期运行的系统进行维护优化,保障性能表现
* [文件目录结构](/administrator#directories):TDengine数据文件、配置文件等所在目录
* [参数限制与保留关键字](/administrator#keywords):TDengine的参数限制与保留关键字列表
......
......@@ -9,8 +9,8 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的
* __10倍以上的性能提升__:定义了创新的数据存储结构,单核每秒能处理至少2万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。
* __硬件或云服务成本降至1/5__:由于超强性能,计算资源不到通用大数据方案的1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的1/10。
* __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件,大幅降低应用开发和维护的复杂度成本。
* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, Matlab随时进行。
* __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, MATLAB随时进行。
* __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类似标准SQL,支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似,零学习成本。
采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。
......
......@@ -49,6 +49,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
</tr>
</table>
注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。
## 如何获取 taos-jdbcdriver
......@@ -531,8 +532,9 @@ Query OK, 1 row(s) in set (0.000141s)
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
| 2.0.22 | 2.0.18.0 及以上 | 1.8.x |
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.0 | 1.8.x |
| 2.0.31 | 2.1.3.0 及以上 | 1.8.x |
| 2.0.22 - 20.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
......@@ -551,7 +553,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT | java.lang.Short |
| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY | byte array |
......
......@@ -56,7 +56,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
*taos.tar.gz*:应用驱动安装包
*driver*:TDengine应用驱动driver
*connector*: 各种编程语言连接器(go/grafanaplugin/nodejs/python/JDBC)
*examples*: 各种编程语言的示例程序(c/C#/go/JDBC/matlab/python/R)
*examples*: 各种编程语言的示例程序(c/C#/go/JDBC/MATLAB/python/R)
运行install_client.sh进行安装
......@@ -427,12 +427,15 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
* res:查询结果集,注意结果集中可能没有记录
* param:调用 `taos_subscribe`时客户程序提供的附加参数
* code:错误码
**注意**:在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。
* `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用`taos_consume`的间隔小于订阅的轮询周期,API将会阻塞,直到时间间隔超过此周期。 如果数据库有新记录到达,该API将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此API。
**注意**:在调用 `taos_consume()` 之后,用户应用应确保尽快调用 `taos_fetch_row()``taos_fetch_block()` 来处理订阅结果,否则服务端会持续缓存查询结果数据等待客户端读取,极端情况下会导致服务端内存消耗殆尽,影响服务稳定性。
* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)`
取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。
......@@ -554,6 +557,13 @@ c1.close()
conn.close()
```
#### 关于纳秒 (nanosecond) 在 Python 连接器中的说明
由于目前 Python 对 nanosecond 支持的不完善(参见链接 1. 2. ),目前的实现方式是在 nanosecond 精度时返回整数,而不是 ms 和 us 返回的 datetime 类型,应用开发者需要自行处理,建议使用 pandas 的 to_datetime()。未来如果 Python 正式完整支持了纳秒,涛思数据可能会修改相关接口。
1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds
2. https://www.python.org/dev/peps/pep-0564/
#### 帮助信息
用户可通过python的帮助信息直接查看模块的使用信息,或者参考tests/examples/python中的示例程序。以下为部分常用类和方法:
......@@ -585,7 +595,9 @@ conn.close()
## <a class="anchor" id="restful"></a>RESTful Connector
为支持各种不同类型平台的开发,TDengine提供符合REST设计标准的API,即RESTful API。为最大程度降低学习成本,不同于其他数据库RESTful API的设计方法,TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库,仅需要一个URL。RESTful连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)
为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 RESTful API。为最大程度降低学习成本,不同于其他数据库 RESTful API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。RESTful 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)
注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。
### HTTP请求格式
......@@ -803,7 +815,7 @@ C#连接器支持的系统有:Linux 64/Windows x64/Windows x86
* 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)
* .NET接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。
* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(dapper)框架驱动。
* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(Dapper)框架驱动。
### 安装验证
......@@ -895,11 +907,15 @@ go env -w GOPROXY=https://goproxy.io,direct
sql.Open内置的方法,Close closes the statement.
### 其他代码示例
[Consume Messages from Kafka](https://github.com/taosdata/go-demo-kafka) 是一个通过 Go 语言实现消费 Kafka 队列写入 TDengine 的示例程序,也可以作为通过 Go 连接 TDengine 的写法参考。
## <a class="anchor" id="nodejs"></a>Node.js Connector
Node.js连接器支持的系统有:
| **CPU类型** | x64(64bit) | | | aarch64 | aarch32 |
|**CPU类型** | x64(64bit) | | | aarch64 | aarch32 |
| ------------ | ------------ | -------- | -------- | -------- | -------- |
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
......
......@@ -75,50 +75,45 @@ sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tde
![img](page://images/connections/import_dashboard2.jpg)
## <a class="anchor" id="matlab"></a>Matlab
## <a class="anchor" id="matlab"></a>MATLAB
MatLab可以通过安装包内提供的JDBC Driver直接连接到TDengine获取数据到本地工作空间。
MATLAB 可以通过安装包内提供的 JDBC Driver 直接连接到 TDengine 获取数据到本地工作空间。
### MatLab的JDBC接口适配
### MATLAB 的 JDBC 接口适配
MatLab的适配有下面几个步骤,下面以Windows10上适配MatLab2017a为例:
MATLAB 的适配有下面几个步骤,下面以 Windows 10 上适配 MATLAB2021a 为例:
- 将TDengine安装包内的驱动程序JDBCDriver-1.0.0-dist.jar拷贝到${matlab_root}\MATLAB\R2017a\java\jar\toolbox
- 将TDengine安装包内的taos.lib文件拷贝至${matlab_ root _dir}\MATLAB\R2017a\lib\win64
- 将新添加的驱动jar包加入MatLab的classpath。在${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt文件中添加下面一行
- 将 TDengine 客户端安装路径下的 `\TDengine\connector\jdbc的驱动程序taos-jdbcdriver-2.0.25-dist.jar` 拷贝到 `${matlab_root}\MATLAB\R2021a\java\jar\toolbox`
- 将 TDengine 安装包内的 `taos.lib` 文件拷贝至 `${matlab_root_dir}\MATLAB\R2021\lib\win64`
- 将新添加的驱动 jar 包加入 MATLAB 的 classpath。在 `${matlab_root_dir}\MATLAB\R2021a\toolbox\local\classpath.txt` 文件中添加下面一行:
```
$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar
$matlabroot/java/jar/toolbox/taos-jdbcdriver-2.0.25-dist.jar
```
- 在${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a\下添加一个文件javalibrarypath.txt, 并在该文件中添加taos.dll的路径,比如您的taos.dll是在安装时拷贝到了C:\Windows\System32下,那么就应该在javalibrarypath.txt中添加如下一行:
-`${user_home}\AppData\Roaming\MathWorks\MATLAB\R2021a\` 下添加一个文件 `javalibrarypath.txt`,并在该文件中添加 taos.dll 的路径,比如您的 taos.dll 是在安装时拷贝到了 `C:\Windows\System32` 下,那么就应该在 `javalibrarypath.txt` 中添加如下一行:
```
C:\Windows\System32
```
### 在MatLab中连接TDengine获取数据
### 在 MATLAB 中连接 TDengine 获取数据
在成功进行了上述配置后,打开MatLab
在成功进行了上述配置后,打开 MATLAB
- 创建一个连接:
```matlab
conn = database(db, root, taosdata, com.taosdata.jdbc.TSDBDriver, jdbc:TSDB://127.0.0.1:0/)
conn = database(‘test’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://192.168.1.94:6030/’)
```
- 执行一次查询:
```matlab
sql0 = [‘select * from tb’]
data = select(conn, sql0);
```
- 插入一条记录:
```matlab
sql1 = [‘insert into tb values (now, 1)’]
exec(conn, sql1)
```
更多例子细节请参考安装包内examples\Matlab\TDengineDemo.m文件。
更多例子细节请参考安装包内 `examples\Matlab\TDengineDemo.m` 文件。
## <a class="anchor" id="r"></a>R
......
......@@ -85,7 +85,7 @@ taos>
将后续的数据节点添加到现有集群,具体有以下几步:
1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;
1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEP 参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030)
2. 在第一个数据节点,使用CLI程序taos, 登录进TDengine系统, 执行命令:
......
......@@ -116,20 +116,22 @@ taosd -C
**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)
不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数:
- days:一个数据文件存储数据的时间跨度单位为天,默认值:10。
- keep:数据库中数据保留的天数单位为天,默认值:3650。(可通过 alter database 修改)
- minRows:文件块中记录的最小条数单位为条,默认值:100。
- maxRows:文件块中记录的最大条数单位为条,默认值:4096。
- comp:文件压缩标志位0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。(可通过 alter database 修改)
- walLevel:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。
- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。
- cache:内存块的大小单位为兆字节(MB),默认值:16。
不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数(既可以作为 create database 指令的参数,也可以写在 taos.cfg 配置文件中用来设定创建新数据库时所采用的默认值)
- days:一个数据文件存储数据的时间跨度单位为天,默认值:10。
- keep:数据库中数据保留的天数单位为天,默认值:3650。(可通过 alter database 修改)
- minRows:文件块中记录的最小条数单位为条,默认值:100。
- maxRows:文件块中记录的最大条数单位为条,默认值:4096。
- comp:文件压缩标志位0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。(可通过 alter database 修改)
- wal:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。(在 taos.cfg 中参数名需要写作 walLevel)(可通过 alter database 修改)
- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。(可通过 alter database 修改)
- cache:内存块的大小单位为兆字节(MB),默认值:16。
- blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。(可通过 alter database 修改)
- replica:副本个数,取值范围:1-3。单位为个,默认值:1。(可通过 alter database 修改)
- precision:时间戳精度标识,ms表示毫秒,us表示微秒。默认值:ms。
- cacheLast:是否在内存中缓存子表的最近数据,0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值,3:同时打开缓存最近行和列功能,默认值:0。(可通过 alter database 修改)(从 2.0.11 版本开始支持此参数)
- replica:副本个数。取值范围:1-3,单位为个,默认值:1。(可通过 alter database 修改)
- quorum:多副本环境下指令执行的确认数要求。取值范围:1、2,单位为个,默认值:1。(可通过 alter database 修改)
- precision:时间戳精度标识。ms表示毫秒,us表示微秒,默认值:ms。(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
- cacheLast:是否在内存中缓存子表的最近数据。0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。默认值:0。(可通过 alter database 修改)(从 2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
- update:是否允许更新。0:不允许;1:允许。默认值:0。
对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL:
......@@ -142,7 +144,6 @@ taosd -C
TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数必须与已有集群的配置相同,否则不能成功加入到集群中。会进行校验的参数如下:
- numOfMnodes:系统中管理节点个数。默认值:3。
- balance:是否启动负载均衡。0:否,1:是。默认值:1。
- mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值:4。
- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。
- statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。
......@@ -150,6 +151,10 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数
- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
- arbitrator: 系统中裁决器的end point,缺省为空。
- timezone、locale、charset 的配置见客户端配置。(2.0.20.0 及以上的版本里,集群中加入新节点已不要求 locale 和 charset 参数取值一致)
- balance:是否启用负载均衡。0:否,1:是。默认值:1。
- flowctrl:是否启用非阻塞流控。0:否,1:是。默认值:1。
- slaveQuery:是否启用 slave vnode 参与查询。0:否,1:是。默认值:1。
- adjustMaster:是否启用 vnode master 负载均衡。0:否,1:是。默认值:1。
为方便调试,可通过SQL语句临时调整每个dnode的日志配置,系统重启后会失效:
......@@ -413,6 +418,19 @@ TDengine启动后,会自动创建一个监测数据库log,并自动将服务
这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。
<a class="anchor" id="optimize"></a>
## 性能优化
因数据行 [update](https://www.taosdata.com/cn/documentation/faq#update)、表删除、数据过期等原因,TDengine 的磁盘存储文件有可能出现数据碎片,影响查询操作的性能表现。从 2.1.3.0 版本开始,新增 SQL 指令 COMPACT 来启动碎片重整过程:
```mysql
COMPACT VNODES IN (vg_id1, vg_id2, ...)
```
COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会通过任务队列尽快安排重整操作的具体执行。COMPACT 指令所需的 VGroup id,可以通过 `SHOW VGROUPS;` 指令的输出结果获取;而且在 `SHOW VGROUPS;` 中会有一个 compacting 列,值为 1 时表示对应的 VGroup 正在进行碎片重整,为 0 时则表示并没有处于重整状态。
需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。
## <a class="anchor" id="directories"></a>文件目录结构
安装TDengine后,默认会在操作系统中生成下列目录或文件:
......@@ -444,7 +462,7 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符
- 表的列名:不能包含特殊字符,不能超过 64 个字符
- 数据库名、表名、列名,都不能以数字开头
- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线”
- 表的列数:不能超过 1024 列
- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置)
- 单条 SQL 语句默认最大字符串长度:65480 byte
......@@ -460,43 +478,44 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下:
| 关键字列表 | | | | |
| ---------- | ----------- | ------------ | ---------- | --------- |
| ABLOCKS | CONNECTIONS | HAVING | MODULES | SMALLINT |
| ABORT | COPY | ID | NCHAR | SPREAD |
| ACCOUNT | COUNT | IF | NE | STABLE |
| ACCOUNTS | CREATE | IGNORE | NONE | STABLES |
| ADD | CTIME | IMMEDIATE | NOT | STAR |
| AFTER | DATABASE | IMPORT | NOTNULL | STATEMENT |
| ALL | DATABASES | IN | NOW | STDDEV |
| ALTER | DAYS | INITIALLY | OF | STREAM |
| AND | DEFERRED | INSERT | OFFSET | STREAMS |
| AS | DELIMITERS | INSTEAD | OR | STRING |
| ASC | DESC | INTEGER | ORDER | SUM |
| ATTACH | DESCRIBE | INTERVAL | PASS | TABLE |
| AVG | DETACH | INTO | PERCENTILE | TABLES |
| BEFORE | DIFF | IP | PLUS | TAG |
| BEGIN | DISTINCT | IS | PRAGMA | TAGS |
| BETWEEN | DIVIDE | ISNULL | PREV | TBLOCKS |
| BIGINT | DNODE | JOIN | PRIVILEGE | TBNAME |
| BINARY | DNODES | KEEP | QUERIES | TIMES |
| BITAND | DOT | KEY | QUERY | TIMESTAMP |
| BITNOT | DOUBLE | KILL | RAISE | TINYINT |
| BITOR | DROP | LAST | REM | TOP |
| BOOL | EACH | LE | REPLACE | TOPIC |
| BOTTOM | END | LEASTSQUARES | REPLICA | TRIGGER |
| BY | EQ | LIKE | RESET | UMINUS |
| CACHE | EXISTS | LIMIT | RESTRICT | UNION |
| CASCADE | EXPLAIN | LINEAR | ROW | UPLUS |
| CHANGE | FAIL | LOCAL | ROWS | USE |
| CLOG | FILL | LP | RP | USER |
| CLUSTER | FIRST | LSHIFT | RSHIFT | USERS |
| COLON | FLOAT | LT | SCORES | USING |
| COLUMN | FOR | MATCH | SELECT | VALUES |
| COMMA | FROM | MAX | SEMI | VARIABLE |
| COMP | GE | METRIC | SET | VGROUPS |
| CONCAT | GLOB | METRICS | SHOW | VIEW |
| CONFIGS | GRANTS | MIN | SLASH | WAVG |
| CONFLICT | GROUP | MINUS | SLIDING | WHERE |
| CONNECTION | GT | MNODES | SLIMIT | |
| 关键字列表 | | | | |
| ------------ | ------------ | ------------ | ------------ | ------------ |
| ABORT | CREATE | IGNORE | NULL | STAR |
| ACCOUNT | CTIME | IMMEDIATE | OF | STATE |
| ACCOUNTS | DATABASE | IMPORT | OFFSET | STATEMENT |
| ADD | DATABASES | IN | OR | STATE_WINDOW |
| AFTER | DAYS | INITIALLY | ORDER | STORAGE |
| ALL | DBS | INSERT | PARTITIONS | STREAM |
| ALTER | DEFERRED | INSTEAD | PASS | STREAMS |
| AND | DELIMITERS | INT | PLUS | STRING |
| AS | DESC | INTEGER | PPS | SYNCDB |
| ASC | DESCRIBE | INTERVAL | PRECISION | TABLE |
| ATTACH | DETACH | INTO | PREV | TABLES |
| BEFORE | DISTINCT | IS | PRIVILEGE | TAG |
| BEGIN | DIVIDE | ISNULL | QTIME | TAGS |
| BETWEEN | DNODE | JOIN | QUERIES | TBNAME |
| BIGINT | DNODES | KEEP | QUERY | TIMES |
| BINARY | DOT | KEY | QUORUM | TIMESTAMP |
| BITAND | DOUBLE | KILL | RAISE | TINYINT |
| BITNOT | DROP | LE | REM | TOPIC |
| BITOR | EACH | LIKE | REPLACE | TOPICS |
| BLOCKS | END | LIMIT | REPLICA | TRIGGER |
| BOOL | EQ | LINEAR | RESET | TSERIES |
| BY | EXISTS | LOCAL | RESTRICT | UMINUS |
| CACHE | EXPLAIN | LP | ROW | UNION |
| CACHELAST | FAIL | LSHIFT | RP | UNSIGNED |
| CASCADE | FILE | LT | RSHIFT | UPDATE |
| CHANGE | FILL | MATCH | SCORES | UPLUS |
| CLUSTER | FLOAT | MAXROWS | SELECT | USE |
| COLON | FOR | MINROWS | SEMI | USER |
| COLUMN | FROM | MINUS | SESSION | USERS |
| COMMA | FSYNC | MNODES | SET | USING |
| COMP | GE | MODIFY | SHOW | VALUES |
| COMPACT | GLOB | MODULES | SLASH | VARIABLE |
| CONCAT | GRANTS | NCHAR | SLIDING | VARIABLES |
| CONFLICT | GROUP | NE | SLIMIT | VGROUPS |
| CONNECTION | GT | NONE | SMALLINT | VIEW |
| CONNECTIONS | HAVING | NOT | SOFFSET | VNODES |
| CONNS | ID | NOTNULL | STABLE | WAL |
| COPY | IF | NOW | STABLES | WHERE |
......@@ -87,6 +87,7 @@
TDengine还没有一组专用的validation queries。然而建议你使用系统监测的数据库”log"来做。
<a class="anchor" id="update"></a>
## 9. 我可以删除或更新一条记录吗?
TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。
......
......@@ -16,6 +16,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
- [Command-line](/getting-started#console) : an easy way to access TDengine server
- [Experience Lightning Speed](/getting-started#demo): running a demo, inserting/querying data to experience faster speed
- [List of Supported Platforms](/getting-started#platforms): a list of platforms supported by TDengine server and client
- [Deploy to Kubernetes](https://taosdata.github.io/TDengine-Operator/en/index.html):a detailed guide for TDengine deployment in Kubernetes environment
## [Overall Architecture](/architecture)
......@@ -28,7 +29,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
## [Data Modeling](/model)
- [Create a Library](/model#create-db): create a library for all data collection points with similar features
- [Create a Database](/model#create-db): create a database for all data collection points with similar features
- [Create a Super Table(STable)](/model#create-stable): create a STable for all data collection points with the same type
- [Create a Table](/model#create-table): use STable as the template, to create a table for each data collecting point
......@@ -70,7 +71,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
## [Connector](/connector)
- [C/C++ Connector](/connector#c-cpp): primary method to connect to TDengine server through libtaos client library
- [Java Connector(JDBC)](/connector/java): driver for connecting to the server from Java applications using the JDBC API
- [Java Connector(JDBC)]: driver for connecting to the server from Java applications using the JDBC API
- [Python Connector](/connector#python): driver for connecting to TDengine server from Python applications
- [RESTful Connector](/connector#restful): a simple way to interact with TDengine via HTTP
- [Go Connector](/connector#go): driver for connecting to TDengine server from Go applications
......@@ -81,7 +82,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
## [Connections with Other Tools](/connections)
- [Grafana](/connections#grafana): query the data saved in TDengine and provide visualization
- [Matlab](/connections#matlab): access data stored in TDengine server via JDBC configured within Matlab
- [MATLAB](/connections#matlab): access data stored in TDengine server via JDBC configured within MATLAB
- [R](/connections#r): access data stored in TDengine server via JDBC configured within R
- [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html): use TDengine visually through IDEA Database Management Tool
......@@ -111,8 +112,8 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
## TDengine Technical Design
- [System Module](/architecture/taosd): taosd functions and modules partitioning
- [Data Replication](/architecture/replica): support real-time synchronous/asynchronous replication, to ensure high-availability of the system
- [System Module]: taosd functions and modules partitioning
- [Data Replication]: support real-time synchronous/asynchronous replication, to ensure high-availability of the system
- [Technical Blog](https://www.taosdata.com/cn/blog/?categories=3): More technical analysis and architecture design articles
## Common Tools
......@@ -121,7 +122,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
- [TDengine performance comparison test tools](https://www.taosdata.com/blog/2020/01/18/1166.html)
- [Use TDengine visually through IDEA Database Management Tool](https://www.taosdata.com/blog/2020/08/27/1767.html)
## [Performance: TDengine vs Others
## Performance: TDengine vs Others
- [Performance: TDengine vs InfluxDB with InfluxDB’s open-source performance testing tool](https://www.taosdata.com/blog/2020/01/13/1105.html)
- [Performance: TDengine vs OpenTSDB](https://www.taosdata.com/blog/2019/08/21/621.html)
......@@ -138,4 +139,4 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
## FAQ
- [FAQ: Common questions and answers](/faq)
\ No newline at end of file
- [FAQ: Common questions and answers](/faq)
......@@ -9,8 +9,8 @@ One of the modules of TDengine is the time-series database. However, in addition
- **Performance improvement over 10 times**: An innovative data storage structure is defined, with each single core can process at least 20,000 requests per second, insert millions of data points, and read more than 10 million data points, which is more than 10 times faster than other existing general database.
- **Reduce the cost of hardware or cloud services to 1/5**: Due to its ultra-performance, TDengine’s computing resources consumption is less than 1/5 of other common Big Data solutions; through columnar storage and advanced compression algorithms, the storage consumption is less than 1/10 of other general databases.
- **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance.
- **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and Matlab.
- **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, Matlab, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to.
- **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and MATLAB.
- **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to.
- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C + +/C#/Go/Node.js, and similar to MySQL with zero learning cost.
With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources.
......@@ -43,26 +43,23 @@ From the perspective of data sources, designers can analyze the applicability of
### System Function Requirements
| | | | | |
| ----------------------------------------------------- | ------------------ | -------------------- | ------------------- | ------------------------------------------------------------ |
| **System Function Requirements** | **Not Applicable** | **Might Applicable** | **Very Applicable** | **Description** |
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require completed data processing algorithms built-in | | √ | | TDengine implements various general data processing algorithms, but has not properly handled all requirements of different industries, so special types of processing shall be processed at the application level. |
| Require a huge amount of crosstab queries | | √ | | This type of processing should be handled more by relational database systems, or TDengine and relational database systems should fit together to implement system functions. |
### System Performance Requirements
| | | | | |
| -------------------------------------------- | ------------------ | -------------------- | ------------------- | ------------------------------------------------------------ |
| **System Performance Requirements** | **Not Applicable** | **Might Applicable** | **Very Applicable** | **Description** |
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require larger total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server-cooperating. |
| Require high-speed data processing | | | √ | TDengine’s storage and data processing are designed to be optimized for IoT, can generally improve the processing speed by multiple times than other similar products. |
| Require fast processing of fine-grained data | | | √ | TDengine has achieved the same level of performance with relational and NoSQL data processing systems. |
### System Maintenance Requirements
| | | | | |
| -------------------------------------------- | ------------------ | -------------------- | ------------------- | ------------------------------------------------------------ |
| **System Maintenance Requirements** | **Not Applicable** | **Might Applicable** | **Very Applicable** | **Description** |
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require system with high-reliability | | | √ | TDengine has a very robust and reliable system architecture to implement simple and convenient daily operation with streamlined experiences for operators, thus human errors and accidents are eliminated to the greatest extent. |
| Require controllable operation learning cost | | | √ | As above. |
| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counselling services. |
\ No newline at end of file
| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counselling services. |
......@@ -2,7 +2,7 @@
## <a class="anchor" id="install"></a>Quick Install
TDegnine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDegnine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package).
TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package).
### <a class="anchor" id="source-install"></a>Install from Source
......@@ -14,7 +14,7 @@ Please visit our [TDengine Official Docker Image: Distribution, Downloading, and
### <a class="anchor" id="package-install"></a>Install from Package
It’s extremely easy to install for TDegnine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs:
It’s extremely easy to install for TDengine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs:
Click [here](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) to download the install package.
......@@ -25,21 +25,13 @@ For more about installation process, please refer [TDengine Installation Package
After installation, you can start the TDengine service by the `systemctl` command.
```bash
```
$ systemctl start taosd
```
```
Then check if the service is working now.
```bash
```
$ systemctl status taosd
```
```
If the service is running successfully, you can play around through TDengine shell `taos`.
......@@ -47,16 +39,14 @@ If the service is running successfully, you can play around through TDengine she
**Note:**
- The `systemctl` command needs the **root** privilege. Use **sudo** if you are not the **root** user.
- To get better product feedback and improve our solution, TDegnine will collect basic usage information, but you can modify the configuration parameter **telemetryReporting** in the system configuration file taos.cfg, and set it to 0 to turn it off.
- TDegnine uses FQDN (usually hostname) as the node ID. In order to ensure normal operation, you need to set hostname for the server running taosd, and configure DNS service or hosts file for the machine running client application, to ensure the FQDN can be resolved.
- To get better product feedback and improve our solution, TDengine will collect basic usage information, but you can modify the configuration parameter **telemetryReporting** in the system configuration file taos.cfg, and set it to 0 to turn it off.
- TDengine uses FQDN (usually hostname) as the node ID. In order to ensure normal operation, you need to set hostname for the server running taosd, and configure DNS service or hosts file for the machine running client application, to ensure the FQDN can be resolved.
- TDengine supports installation on Linux systems with[ systemd ](https://en.wikipedia.org/wiki/Systemd)as the process service management, and uses `which systemctl` command to detect whether `systemd` packages exist in the system:
- ```bash
```bash
$ which systemctl
```
- $ which systemctl
- ```
If `systemd` is not supported in the system, TDengine service can also be launched via `/usr/local/taos/bin/taosd` manually.
## <a class="anchor" id="console"></a>TDengine Shell Command Line
......@@ -64,28 +54,18 @@ If `systemd` is not supported in the system, TDengine service can also be launch
To launch TDengine shell, the command line interface, in a Linux terminal, type:
```bash
```
$ taos
```
```
The welcome message is printed if the shell connects to TDengine server successfully, otherwise, an error message will be printed (refer to our [FAQ](https://www.taosdata.com/en/faq) page for troubleshooting the connection error). The TDengine shell prompt is:
```cmd
```
taos>
```
```
In the TDengine shell, you can create databases, create tables and insert/query data with SQL. Each query command ends with a semicolon. It works like MySQL, for example:
```mysql
```
create database demo;
use demo;
......@@ -107,8 +87,6 @@ ts | speed |
19-07-15 01:00:00.000| 20|
Query OK, 2 row(s) in set (0.001700s)
```
```
Besides the SQL commands, the system administrator can check system status, add or delete accounts, and manage the servers.
......@@ -127,11 +105,7 @@ You can configure command parameters to change how TDengine shell executes. Some
Examples:
```bash
```
$ taos -h 192.168.0.1 -s "use db; show tables;"
```
```
### Run SQL Command Scripts
......@@ -139,11 +113,7 @@ $ taos -h 192.168.0.1 -s "use db; show tables;"
Inside TDengine shell, you can run SQL scripts in a file with source command.
```mysql
```
taos> source <filename>;
```
```
### Shell Tips
......@@ -158,11 +128,7 @@ taos> source <filename>;
After starting the TDengine server, you can execute the command `taosdemo` in the Linux terminal.
```bash
```
$ taosdemo
```
```
Using this command, a STable named `meters` will be created in the database `test` There are 10k tables under this stable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai".
......@@ -174,51 +140,31 @@ In the TDengine client, enter sql query commands and then experience our lightni
- query total rows of records:
```mysql
```
taos> select count(*) from test.meters;
```
```
- query average, max and min of the total 1 billion records:
```mysql
```
taos> select avg(f1), max(f2), min(f3) from test.meters;
```
```
- query the number of records where loc="beijing":
```mysql
```
taos> select count(*) from test.meters where loc="beijing";
```
```
- query the average, max and min of total records where areaid=10:
```mysql
```
taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10;
```
```
- query the average, max, min from table t10 when aggregating over every 10s:
```mysql
```
taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
```
```
**Note**: you can run command `taosdemo` with many options, like number of tables, rows of records and so on. To know more about these options, you can execute `taosdemo --help` and then take a try using different options.
......@@ -274,4 +220,4 @@ Comparison matrix as following:
Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
Please visit [Connectors](https://www.taosdata.com/cn/documentation/connector) section for more detailed information.
\ No newline at end of file
Please visit [Connectors](https://www.taosdata.com/en/documentation/connector) section for more detailed information.
此差异已折叠。
此差异已折叠。
# Efficient Data Writing
TDengine supports multiple interfaces to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in a single piece or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, nonsequential data insertion, and also historical data insertion.
## <a class="anchor" id="sql"></a> SQL Writing
Applications insert data by executing SQL insert statements through C/C + +, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
```mysql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
```
TDengine supports writing multiple records at a time. For example, the following command writes two records to table d1001:
```mysql
INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25);
```
TDengine also supports writing data to multiple tables at a time. For example, the following command writes two records to d1001 and one record to d1002:
```mysql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
```
For the SQL INSERT Grammar, please refer to [Taos SQL insert](https://www.taosdata.com/en/documentation/taos-sql#insert)
**Tips:**
- To improve writing efficiency, batch writing is required. The more records written in a batch, the higher the insertion efficiency. However, a record cannot exceed 16K, and the total length of an SQL statement cannot exceed 64K (it can be configured by parameter maxSQLLength, and the maximum can be configured to 1M).
- TDengine supports multi-thread parallel writing. To further improve writing speed, a client needs to open more than 20 threads to write parallelly. However, after the number of threads reaches a certain threshold, it cannot be increased or even become decreased, because too much frequent thread switching brings extra overhead.
- For a same table, if the timestamp of a newly inserted record already exists, (no database was created using UPDATE 1) the new record will be discarded as default, that is, the timestamp must be unique in a table. If an application automatically generates records, it is very likely that the generated timestamps will be the same, so the number of records successfully inserted will be smaller than the number of records the application try to insert. If you use UPDATE 1 option when creating a database, inserting a new record with the same timestamp will overwrite the original record.
- The timestamp of written data must be greater than the current time minus the time of configuration parameter keep. If keep is configured for 3650 days, data older than 3650 days cannot be written. The timestamp for writing data cannot be greater than the current time plus configuration parameter days. If days is configured to 2, data 2 days later than the current time cannot be written.
## <a class="anchor" id="prometheus"></a> Direct Writing of Prometheus
As a graduate project of Cloud Native Computing Foundation, [Prometheus](https://www.prometheus.io/) is widely used in the field of performance monitoring and K8S performance monitoring. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Prometheus without any code, and can directly write the data collected by Prometheus into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
### Compile blm_prometheus From Source
Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares:
- A server running Linux OS
- Golang version 1.10 and higher installed
- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
Bailongma project has a folder, blm_prometheus, which holds the prometheus writing API. The compiling process is as follows:
```bash
cd blm_prometheus
go build
```
If everything goes well, an executable of blm_prometheus will be generated in the corresponding directory.
### Install Prometheus
Download and install as the instruction of Prometheus official website. [Download Address](https://prometheus.io/download/)
### Configure Prometheus
Read the Prometheus [configuration document](https://prometheus.io/docs/prometheus/latest/configuration/configuration/) and add following configurations in the section of Prometheus configuration file
- url: The URL provided by bailongma API service, refer to the blm_prometheus startup example section below
After Prometheus launched, you can check whether data is written successfully through query taos client.
### Launch blm_prometheus
blm_prometheus has following options that you can configure when you launch blm_prometheus.
```sh
--tdengine-name
If TDengine is installed on a server with a domain name, you can also access the TDengine by configuring the domain name of it. In K8S environment, it can be configured as the service name that TDengine runs
--batch-size
blm_prometheus assembles the received prometheus data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time.
--dbname
Set a name for the database created in TDengine, blm_prometheus will automatically create a database named dbname in TDengine, and the default value is prometheus.
--dbuser
Set the user name to access TDengine, the default value is'root '
--dbpassword
Set the password to access TDengine, the default value is'taosdata '
--port
The port number blm_prometheus used to serve prometheus.
```
### Example
Launch an API service for blm_prometheus with the following command:
```bash
./blm_prometheus -port 8088
```
Assuming that the IP address of the server where blm_prometheus located is "10.1.2. 3", the URL shall be added to the configuration file of Prometheus as:
remote_write:
\- url: "http://10.1.2.3:8088/receive"
### Query written data of prometheus
The format of generated data by Prometheus is as follows:
```json
{
Timestamp: 1576466279341,
Value: 37.000000,
apiserver_request_latencies_bucket {
component="apiserver",
instance="192.168.99.116:8443",
job="kubernetes-apiservers",
le="125000",
resource="persistentvolumes", s
cope="cluster",
verb="LIST",
version=“v1"
}
}
```
Where apiserver_request_latencies_bucket is the name of the time-series data collected by prometheus, and the tag of the time-series data is in the following {}. blm_prometheus automatically creates a STable in TDengine with the name of the time series data, and converts the tag in {} into the tag value of TDengine, with Timestamp as the timestamp and value as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction.
```mysql
use prometheus;
select * from apiserver_request_latencies_bucket;
```
## <a class="anchor" id="telegraf"></a> Direct Writing of Telegraf
[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is a popular open source tool for IT operation data collection. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Telegraf without any code, and can directly write the data collected by Telegraf into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
### Compile blm_telegraf From Source Code
Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares:
- A server running Linux OS
- Golang version 1.10 and higher installed
- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
Bailongma project has a folder, blm_telegraf, which holds the Telegraf writing API. The compiling process is as follows:
```bash
cd blm_telegraf
go build
```
If everything goes well, an executable of blm_telegraf will be generated in the corresponding directory.
### Install Telegraf
At the moment, TDengine supports Telegraf version 1.7. 4 and above. Users can download the installation package on Telegraf's website according to your current operating system. The download address is as follows: https://portal.influxdata.com/downloads
### Configure Telegraf
Modify the TDengine-related configurations in the Telegraf configuration file /etc/telegraf/telegraf.conf.
In the output plugins section, add the [[outputs.http]] configuration:
- url: The URL provided by bailongma API service, please refer to the example section below
- data_format: "json"
- json_timestamp_units: "1ms"
In agent section:
- hostname: The machine name that distinguishes different collection devices, and it is necessary to ensure its uniqueness
- metric_batch_size: 100, which is the max number of records per batch wriiten by Telegraf allowed. Increasing the number can reduce the request sending frequency of Telegraf.
For information on how to use Telegraf to collect data and more about using Telegraf, please refer to the official [document](https://docs.influxdata.com/telegraf/v1.11/) of Telegraf.
### Launch blm_telegraf
blm_telegraf has following options, which can be set to tune configurations of blm_telegraf when launching.
```sh
--host
The ip address of TDengine server, default is null
--batch-size
blm_prometheus assembles the received telegraf data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time.
--dbname
Set a name for the database created in TDengine, blm_telegraf will automatically create a database named dbname in TDengine, and the default value is prometheus.
--dbuser
Set the user name to access TDengine, the default value is 'root '
--dbpassword
Set the password to access TDengine, the default value is'taosdata '
--port
The port number blm_telegraf used to serve Telegraf.
```
### Example
Launch an API service for blm_telegraf with the following command
```bash
./blm_telegraf -host 127.0.0.1 -port 8089
```
Assuming that the IP address of the server where blm_telegraf located is "10.1.2. 3", the URL shall be added to the configuration file of telegraf as:
```yaml
url = "http://10.1.2.3:8089/telegraf"
```
### Query written data of telegraf
The format of generated data by telegraf is as follows:
```json
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 89.7897897897898,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 5.405405405405405,
"usage_user": 4.804804804804805
},
"name": "cpu",
"tags": {
"cpu": "cpu2",
"host": "bogon"
},
"timestamp": 1576464360
}
```
Where the name field is the name of the time-series data collected by telegraf, and the tag field is the tag of the time-series data. blm_telegraf automatically creates a STable in TDengine with the name of the time series data, and converts the tag field into the tag value of TDengine, with Timestamp as the timestamp and fields values as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction.
```mysql
use telegraf;
select * from cpu;
```
MQTT is a popular data transmission protocol in the IoT. TDengine can easily access the data received by MQTT Broker and write it to TDengine.
## <a class="anchor" id="emq"></a> Direct Writing of EMQ Broker
[EMQ](https://github.com/emqx/emqx) is an open source MQTT Broker software, with no need of coding, only to use "rules" in EMQ Dashboard for simple configuration, and MQTT data can be directly written into TDengine. EMQ X supports storing data to the TDengine by sending it to a Web service, and also provides a native TDengine driver on Enterprise Edition for direct data store. Please refer to [EMQ official documents](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine) for more details.
## <a class="anchor" id="hivemq"></a> Direct Writing of HiveMQ Broker
[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details.
# Efficient Data Querying
## <a class="anchor" id="queries"></a> Main Query Features
TDengine uses SQL as the query language. Applications can send SQL statements through C/C + +, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions:
- Single-column and multi-column data query
- Multiple filters for tags and numeric values: >, <, =, < >, like, etc
- Group by, Order by, Limit/Offset of aggregation results
- Four operations for numeric columns and aggregation results
- Time stamp aligned join query (implicit join) operations
- Multiple aggregation/calculation functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff, etc
For example, in TAOS shell, the records with vlotage > 215 are queried from table d1001, sorted in descending order by timestamps, and only two records are outputted.
```mysql
taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
ts | current | voltage | phase |
======================================================================================
2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 |
2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 |
Query OK, 2 row(s) in set (0.001100s)
```
In order to meet the needs of an IoT scenario, TDengine supports several special functions, such as twa (time weighted average), spread (difference between maximum and minimum), last_row (last record), etc. More functions related to IoT scenarios will be added. TDengine also supports continuous queries.
For specific query syntax, please see the [Data Query section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#select).
## <a class="anchor" id="aggregation"></a> Multi-table Aggregation Query
In an IoT scenario, there are often multiple data collection points in a same type. TDengine uses the concept of STable to describe a certain type of data collection point, and an ordinary table to describe a specific data collection point. At the same time, TDengine uses tags to describe the statical attributes of data collection points. A given data collection point has a specific tag value. By specifying the filters of tags, TDengine provides an efficient method to aggregate and query the sub-tables of STables (data collection points of a certain type). Aggregation functions and most operations on ordinary tables are applicable to STables, and the syntax is exactly the same.
**Example 1**: In TAOS Shell, look up the average voltages collected by all smart meters in Beijing and group them by location
```mysql
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
avg(voltage) | location |
=============================================================
222.000000000 | Beijing.Haidian |
219.200000000 | Beijing.Chaoyang |
Query OK, 2 row(s) in set (0.002136s)
```
**Example 2**: In TAOS Shell, look up the number of records with groupId 2 in the past 24 hours, check the maximum current of all smart meters
```mysql
taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h;
cunt(*) | max(current) |
==================================
5 | 13.4 |
Query OK, 1 row(s) in set (0.002136s)
```
TDengine only allows aggregation queries between tables belonging to a same STable, means aggregation queries between different STables are not supported. In the Data Query section of TAOS SQL, query class operations will all be indicated that whether STables are supported.
## <a class="anchor" id="sampling"></a> Down Sampling Query, Interpolation
In a scenario of IoT, it is often necessary to aggregate the collected data by intervals through down sampling. TDengine provides a simple keyword interval, which makes query operations according to time windows extremely simple. For example, the current values collected by smart meter d1001 are summed every 10 seconds.
```mysql
taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
ts | sum(current) |
======================================================
2018-10-03 14:38:00.000 | 10.300000191 |
2018-10-03 14:38:10.000 | 24.900000572 |
Query OK, 2 row(s) in set (0.000883s)
```
The down sampling operation is also applicable to STables, such as summing the current values collected by all smart meters in Beijing every second.
```mysql
taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.000 | 10.199999809 |
2018-10-03 14:38:05.000 | 32.900000572 |
2018-10-03 14:38:06.000 | 11.500000000 |
2018-10-03 14:38:15.000 | 12.600000381 |
2018-10-03 14:38:16.000 | 36.000000000 |
Query OK, 5 row(s) in set (0.001538s)
```
The down sampling operation also supports time offset, such as summing the current values collected by all smart meters every second, but requires each time window to start from 500 milliseconds.
```mysql
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.500 | 11.189999809 |
2018-10-03 14:38:05.500 | 31.900000572 |
2018-10-03 14:38:06.500 | 11.600000000 |
2018-10-03 14:38:15.500 | 12.300000381 |
2018-10-03 14:38:16.500 | 35.000000000 |
Query OK, 5 row(s) in set (0.001521s)
```
In a scenario of IoT, it is difficult to synchronize the time stamp of collected data at each point, but many analysis algorithms (such as FFT) need to align the collected data strictly at equal intervals of time. In many systems, it’s required to write their own programs to process, but the down sampling operation of TDengine can be easily solved. If there is no collected data in an interval, TDengine also provides interpolation calculation function.
For details of syntax rules, please refer to the [Time-dimension Aggregation section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#aggregation).
\ No newline at end of file
此差异已折叠。
此差异已折叠。
# Connections with Other Tools
## <a class="anchor" id="grafana"></a> Grafana
TDengine can quickly integrate with [Grafana](https://www.grafana.com/), an open source data visualization system, to build a data monitoring and alarming system. The whole process does not require any code to write. The contents of the data table in TDengine can be visually showed on DashBoard.
### Install Grafana
TDengine currently supports Grafana 5.2.4 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows:
https://grafana.com/grafana/download.
### Configure Grafana
TDengine Grafana plugin is in the /usr/local/taos/connector/grafanaplugin directory.
Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana.
```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
```
### Use Grafana
#### Configure data source
You can log in the Grafana server (username/password:admin/admin) through localhost:3000, and add data sources through `Configuration -> Data Sources` on the left panel, as shown in the following figure:
![img](page://images/connections/add_datasource1.jpg)
Click `Add data source` to enter the Add Data Source page, and enter TDengine in the query box to select Add, as shown in the following figure:
![img](page://images/connections/add_datasource2.jpg)
Enter the data source configuration page and modify the corresponding configuration according to the default prompt:
![img](page://images/connections/add_datasource3.jpg)
- Host: IP address of any server in TDengine cluster and port number of TDengine RESTful interface (6041), default [http://localhost:6041](http://localhost:6041/)
- User: TDengine username.
- Password: TDengine user password.
Click `Save & Test` to test. Success will be prompted as follows:
![img](page://images/connections/add_datasource4.jpg)
#### Create Dashboard
Go back to the home to create Dashboard, and click `Add Query` to enter the panel query page:
![img](page://images/connections/create_dashboard1.jpg)
As shown in the figure above, select the TDengine data source in Query, and enter the corresponding sql in the query box below to query. Details are as follows:
- INPUT SQL: Enter the statement to query (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` , where `from`, `to` and `interval` are built-in variables of the TDengine plug-in, representing the query range and time interval obtained from the Grafana plug-in panel. In addition to built-in variables, it is also supported to use custom template variables.
- ALIAS BY: You can set alias for the current queries.
- GENERATE SQL: Clicking this button will automatically replace the corresponding variable and generate the final statement to execute.
According to the default prompt, query the average system memory usage at the specified interval of the server where the current TDengine deployed in as follows:
![img](page://images/connections/create_dashboard2.jpg)
> Please refer to Grafana [documents](https://grafana.com/docs/) for how to use Grafana to create the corresponding monitoring interface and for more about Grafana usage.
#### Import Dashboard
A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory/usr/local/taos/connector/grafana/tdengine/dashboard/.
Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file:
![img](page://images/connections/import_dashboard1.jpg)
You can see as follows after Dashboard imported.
![img](page://images/connections/import_dashboard2.jpg)
## <a class="anchor" id="matlab"></a> MATLAB
MATLAB can access data to the local workspace by connecting directly to the TDengine via the JDBC Driver provided in the installation package.
### JDBC Interface Adaptation of MATLAB
Several steps are required to adapt MATLAB to TDengine. Taking adapting MATLAB2017a on Windows10 as an example:
- Copy the file JDBCDriver-1.0.0-dist.ja*r* in TDengine package to the directory ${matlab_root}\MATLAB\R2017a\java\jar\toolbox
- Copy the file taos.lib in TDengine package to ${matlab root dir}\MATLAB\R2017a\lib\win64
- Add the .jar package just copied to the MATLAB classpath. Append the line below as the end of the file of ${matlab root dir}\MATLAB\R2017a\toolbox\local\classpath.txt
- ```
$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar
```
- Create a file called javalibrarypath.txt in directory ${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a_, and add the _taos.dll path in the file. For example, if the file taos.dll is in the directory of C:\Windows\System32,then add the following line in file javalibrarypath.txt:
- ```
C:\Windows\System32
```
- ### Connect to TDengine in MATLAB to get data
After the above configured successfully, open MATLAB.
- Create a connection:
```matlab
conn = database(db, root, taosdata, com.taosdata.jdbc.TSDBDriver, jdbc:TSDB://127.0.0.1:0/)
```
* Make a query:
```matlab
sql0 = [select * from tb]
data = select(conn, sql0);
```
* Insert a record:
```matlab
sql1 = [insert into tb values (now, 1)]
exec(conn, sql1)
```
For more detailed examples, please refer to the examples\Matlab\TDEngineDemo.m file in the package.
## <a class="anchor" id="r"></a> R
R language supports connection to the TDengine database through the JDBC interface. First, you need to install the JDBC package of R language. Launch the R language environment, and then execute the following command to install the JDBC support library for R language:
```R
install.packages('RJDBC', repos='http://cran.us.r-project.org')
```
After installed, load the RJDBC package by executing `library('RJDBC')` command.
Then load the TDengine JDBC driver:
```R
drv<-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-2.0.0-dist.jar", identifier.quote="\"")
```
If succeed, no error message will display. Then use the following command to try a database connection:
```R
conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","root","taosdata")
```
Please replace the IP address in the command above to the correct one. If no error message is shown, then the connection is established successfully, otherwise the connection command needs to be adjusted according to the error prompt. TDengine supports below functions in *RJDBC* package:
- `dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)`: Write the data in a data frame iris to the table test in the TDengine server. Parameter overwrite must be false. append must be TRUE and the schema of the data frame iris should be the same as the table test.
- `dbGetQuery(conn, "select count(*) from test")`: run a query command
- `dbSendUpdate (conn, "use db")`: Execute any non-query sql statement. For example, `dbSendUpdate (conn, "use db")`, write data `dbSendUpdate (conn, "insert into t1 values (now, 99)")`, and the like.
- `dbReadTable(conn, "test")`: read all the data in table test
- `dbDisconnect(conn)`: close a connection
- `dbRemoveTable(conn, "test")`: remove table test
The functions below are not supported currently:
- `dbExistsTable(conn, "test")`: if table test exists
- `dbListTables(conn)`: list all tables in the connection
\ No newline at end of file
此差异已折叠。
此差异已折叠。
此差异已折叠。
# FAQ
Tutorials & FAQ
## 0.How to report an issue?
If the contents in FAQ cannot help you and you need the technical support and assistance of TDengine team, please package the contents in the following two directories:
1./var/log/taos (if default path has not been modified)
2./etc/taos
Provide the necessary description of the problem, including the version information of TDengine used, the platform environment information, the execution operation of the problem, the characterization of the problem and the approximate time, and submit the Issue on [GitHub](https://github.com/taosdata/TDengine).
To ensure that there is enough debug information, if the problem can be repeated, please modify the/etc/taos/taos.cfg file, add a line of "debugFlag 135" at the end (without quotation marks themselves), then restart taosd, repeat the problem, and then submit. You can also temporarily set the log level of taosd through the following SQL statement.
```
alter dnode <dnode_id> debugFlag 135;
```
However, when the system is running normally, please set debugFlag to 131, otherwise a large amount of log information will be generated and the system efficiency will be reduced.
## 1.What should I pay attention to when upgrading TDengine from older versions to 2.0 and above? ☆☆☆
Version 2.0 is a complete refactoring of the previous version, and the configuration and data files are incompatible. Be sure to do the following before upgrading:
1. Delete the configuration file, execute sudo rm `-rf /etc/taos/taos.cfg`
2. Delete the log file, execute `sudo rm -rf /var/log/taos/`
3. By ensuring that the data is no longer needed, delete the data file and execute `sudo rm -rf /var/lib/taos/`
4. Install the latest stable version of TDengine
5. If you need to migrate data or the data file is corrupted, please contact the official technical support team of TAOS Data to assist
## 2. When encoutered with the error " Unable to establish connection " in Windows, what can I do?
See the [technical blog](https://www.taosdata.com/blog/2019/12/03/jdbcdriver%E6%89%BE%E4%B8%8D%E5%88%B0%E5%8A%A8%E6%80%81%E9%93%BE%E6%8E%A5%E5%BA%93/) for this issue.
## 3. Why I get “more dnodes are needed” when create a table?
See the [technical blog](https://www.taosdata.com/blog/2019/12/03/%E5%88%9B%E5%BB%BA%E6%95%B0%E6%8D%AE%E8%A1%A8%E6%97%B6%E6%8F%90%E7%A4%BAmore-dnodes-are-needed/) for this issue.
## 4. How do I generate a core file when TDengine crashes?
See the [technical blog](https://www.taosdata.com/blog/2019/12/06/tdengine-crash%E6%97%B6%E7%94%9F%E6%88%90core%E6%96%87%E4%BB%B6%E7%9A%84%E6%96%B9%E6%B3%95/) for this issue.
## 5. What should I do if I encounter an error "Unable to establish connection"?
When the client encountered a connection failure, please follow the following steps to check:
1. Check your network environment
2. - Cloud server: Check whether the security group of the cloud server opens access to TCP/UDP ports 6030-6042
- Local virtual machine: Check whether the network can be pinged, and try to avoid using localhost as hostname
- Corporate server: If you are in a NAT network environment, be sure to check whether the server can return messages to the client
2. Make sure that the client and server version numbers are exactly the same, and the open source Community Edition and Enterprise Edition cannot be mixed.
3. On the server, execute systemctl status taosd to check the running status of *taosd*. If not running, start *taosd*.
4. Verify that the correct server FQDN (Fully Qualified Domain Name, which is available by executing the Linux command hostname-f on the server) is specified when the client connects. FQDN configuration reference: "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
5. Ping the server FQDN. If there is no response, please check your network, DNS settings, or the system hosts file of the computer where the client is located.
6. Check the firewall settings (Ubuntu uses ufw status, CentOS uses firewall-cmd-list-port) to confirm that TCP/UDP ports 6030-6042 are open.
7. For JDBC (ODBC, Python, Go and other interfaces are similar) connections on Linux, make sure that libtaos.so is in the directory /usr/local/taos/driver, and /usr/local/taos/driver is in the system library function search path LD_LIBRARY_PATH.
8. For JDBC, ODBC, Python, Go, etc. connections on Windows, make sure that C:\ TDengine\ driver\ taos.dll is in your system library function search directory (it is recommended that taos.dll be placed in the directory C:\ Windows\ System32)
9. If the connection issue still exist
1. - On Linux system, please use the command line tool nc to determine whether the TCP and UDP connections on the specified ports are unobstructed. Check whether the UDP port connection works: nc -vuz {hostIP} {port} Check whether the server-side TCP port connection works: nc -l {port}Check whether the client-side TCP port connection works: nc {hostIP} {port}
- Windows systems use the PowerShell command Net-TestConnection-ComputerName {fqdn} Port {port} to detect whether the service-segment port is accessed
10. You can also use the built-in network connectivity detection function of taos program to verify whether the specified port connection between the server and the client is unobstructed (including TCP and UDP): [TDengine's Built-in Network Detection Tool Use Guide](https://www.taosdata.com/blog/2020/09/08/1816.html).
## 6.What to do if I encounter an error "Unexpected generic error in RPC" or "TDengine error: Unable to resolve FQDN"?
This error occurs because the client or data node cannot parse the FQDN (Fully Qualified Domain Name). For TAOS shell or client applications, check the following:
1. Please verify whether the FQDN of the connected server is correct. FQDN configuration reference: "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
2. If the network is configured with a DNS server, check that it is working properly.
3. If the network does not have a DNS server configured, check the hosts file of the machine where the client is located to see if the FQDN is configured and has the correct IP address.
4. If the network configuration is OK, from the machine where the client is located, you need to be able to ping the connected FQDN, otherwise the client cannot connect to the server
## 7.Although the syntax is corrected, why do I still get the “Invalid SQL" error?
If you confirm that the syntax is correct, for versions older than 2.0, please check whether the SQL statement length exceeds 64K. If it does, this error will also be returned.
## 8. Are “validation queries” supported?
The TDengine does not yet have a dedicated set of validation queries. However, it is recommended to use the database "log" monitored by the system.
## 9. Can I delete or update a record?
TDengine does not support the deletion function at present, and may support it in the future according to user requirements.
Starting from 2.0. 8.0, TDengine supports the function of updating written data. Using the update function requires using UPDATE 1 parameter when creating the database, and then you can use INSERT INTO command to update the same timestamp data that has been written. UPDATE parameter does not support ALTER DATABASE command modification. Without a database created using UPDATE 1 parameter, writing data with the same timestamp will not modify the previous data with no error reported.
It should also be noted that when UPDATE is set to 0, the data with the same timestamp sent later will be discarded directly, but no error will be reported, and will still be included in affected rows (so the return information of INSERT instruction cannot be used for timestamp duplicate checking). The main reason for this design is that TDengine regards the written data as a stream. Regardless of whether the timestamp conflicts or not, TDengine believes that the original device that generates the data actually generates such data. The UPDATE parameter only controls how such stream data should be processed when persistence-when UPDATE is 0, it means that the data written first overwrites the data written later; When UPDATE is 1, it means that the data written later overwrites the data written first. How to choose this coverage relationship depends on whether the data generated first or later is expected in the subsequent use and statistics compile.
## 10. How to create a table with more than 1024 columns?
Using version 2.0 and above, 1024 columns are supported by default; for older versions, TDengine allowed the creation of a table with a maximum of 250 columns. However, if the limit is exceeded, it is recommended to logically split this wide table into several small ones according to the data characteristics.
## 11. What is the most effective way to write data?
Insert in batches. Each write statement can insert multiple records into one or multiple tables at the same time.
## 12. What is the most effective way to write data? How to solve the problem that Chinese characters in nchar inserted under Windows systems are parsed into messy code?
If there are Chinese characters in nchar data under Windows, please first confirm that the region of the system is set to China (which can be set in the Control Panel), then the taos client in cmd should already support it normally; If you are developing Java applications in an IDE, such as Eclipse and Intellij, please confirm that the file code in the IDE is GBK (this is the default coding type of Java), and then initialize the configuration of the client when generating the Connection. The specific statement is as follows:
```JAVA
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
Connection = DriverManager.getConnection(url, properties);
```
## 13. JDBC error: the excluded SQL is not a DML or a DDL?
Please update to the latest JDBC driver.
```xml
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.27</version>
</dependency>
```
## 14. taos connect failed, reason: invalid timestamp.
The common reason is that the server time and client time are not calibrated, which can be calibrated by synchronizing with the time server (use ntpdate command under Linux, and select automatic synchronization in the Windows time setting).
## 15. Incomplete display of table name
Due to the limited display width of taos shell in the terminal, it is possible that a relatively long table name is not displayed completely. If relevant operations are carried out according to the displayed incomplete table name, a Table does not exist error will occur. The workaround can be by modifying the setting option maxBinaryDisplayWidth in the taos.cfg file, or directly entering the command `set max_binary_display_width 100`. Or, use the \\G parameter at the end of the command to adjust how the results are displayed.
## 16. How to migrate data?
TDengine uniquely identifies a machine according to hostname. When moving data files from machine A to machine B, pay attention to the following three points:
- For versions 2.0. 0.0 to 2.0. 6. x, reconfigure machine B's hostname to machine A's.
- For 2.0. 7.0 and later versions, go to/var/lib/taos/dnode, repair the FQDN corresponding to dnodeId of dnodeEps.json, and restart. Make sure this file is identical for all machines.
- The storage structures of versions 1. x and 2. x are incompatible, and it is necessary to use migration tools or your own application to export and import data.
## 17. How to temporarily adjust the log level in command line program taos?
For the convenience of debugging, since version 2.0. 16, command line program taos gets two new instructions related to logging:
```mysql
ALTER LOCAL flag_name flag_value;
```
This means that under the current command line program, modify the loglevel of a specific module (only valid for the current command line program, if taos is restarted, it needs to be reset):
- The values of flag_name can be: debugFlag, cDebugFlag, tmrDebugFlag, uDebugFlag, rpcDebugFlag
- Flag_value values can be: 131 (output error and alarm logs), 135 (output error, alarm, and debug logs), 143 (output error, alarm, debug, and trace logs)
```mysql
ALTER LOCAL RESETLOG;
```
This means wiping up all client-generated log files on the machine.
# Connect with other tools
## Telegraf
TDengine is easy to integrate with [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/), an open-source server agent for collecting and sending metrics and events, without more development.
### Install Telegraf
At present, TDengine supports Telegraf newer than version 1.7.4. Users can go to the [download link] and choose the proper package to install on your system.
### Configure Telegraf
Telegraf is configured by changing items in the configuration file */etc/telegraf/telegraf.conf*.
In **output plugins** section,add _[[outputs.http]]_ iterm:
- _url_: http://ip:6020/telegraf/udb, in which _ip_ is the IP address of any node in TDengine cluster. Port 6020 is the RESTful APT port used by TDengine. _udb_ is the name of the database to save data, which needs to create beforehand.
- _method_: "POST"
- _username_: username to login TDengine
- _password_: password to login TDengine
- _data_format_: "json"
- _json_timestamp_units_: "1ms"
In **agent** part:
- hostname: used to distinguish different machines. Need to be unique.
- metric_batch_size: 30,the maximum number of records allowed to write in Telegraf. The larger the value is, the less frequent requests are sent. For TDengine, the value should be less than 50.
Please refer to the [Telegraf docs](https://docs.influxdata.com/telegraf/v1.11/) for more information.
## Grafana
[Grafana] is an open-source system for time-series data display. It is easy to integrate TDengine and Grafana to build a monitor system. Data saved in TDengine can be fetched and shown on the Grafana dashboard.
### Install Grafana
For now, TDengine only supports Grafana newer than version 5.2.4. Users can go to the [Grafana download page] for the proper package to download.
### Configure Grafana
TDengine Grafana plugin is in the _/usr/local/taos/connector/grafana_ directory.
Taking Centos 7.2 as an example, just copy TDengine directory to _/var/lib/grafana/plugins_ directory and restart Grafana.
### Use Grafana
Users can log in the Grafana server (username/password:admin/admin) through localhost:3000 to configure TDengine as the data source. As is shown in the picture below, TDengine as a data source option is shown in the box:
![img](../assets/clip_image001.png)
When choosing TDengine as the data source, the Host in HTTP configuration should be configured as the IP address of any node of a TDengine cluster. The port should be set as 6020. For example, when TDengine and Grafana are on the same machine, it should be configured as _http://localhost:6020.
Besides, users also should set the username and password used to log into TDengine. Then click _Save&Test_ button to save.
![img](../assets/clip_image001-2474914.png)
Then, TDengine as a data source should show in the Grafana data source list.
![img](../assets/clip_image001-2474939.png)
Then, users can create Dashboards in Grafana using TDengine as the data source:
![img](../assets/clip_image001-2474961.png)
Click _Add Query_ button to add a query and input the SQL command you want to run in the _INPUT SQL_ text box. The SQL command should expect a two-row, multi-column result, such as _SELECT count(*) FROM sys.cpu WHERE ts>=from and ts<​to interval(interval)_, in which, _from_, _to_ and _inteval_ are TDengine inner variables representing query time range and time interval.
_ALIAS BY_ field is to set the query alias. Click _GENERATE SQL_ to send the command to TDengine:
![img](../assets/clip_image001-2474987.png)
Please refer to the [Grafana official document] for more information about Grafana.
## Matlab
Matlab can connect to and retrieve data from TDengine by TDengine JDBC Driver.
### MatLab and TDengine JDBC adaptation
Several steps are required to adapt Matlab to TDengine. Taking adapting Matlab2017a on Windows10 as an example:
1. Copy the file _JDBCDriver-1.0.0-dist.jar_ in TDengine package to the directory _${matlab_root}\MATLAB\R2017a\java\jar\toolbox_
2. Copy the file _taos.lib_ in TDengine package to _${matlab_ root _dir}\MATLAB\R2017a\lib\win64_
3. Add the .jar package just copied to the Matlab classpath. Append the line below as the end of the file of _${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt_
`$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar`
4. Create a file called _javalibrarypath.txt_ in directory _${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a\_, and add the _taos.dll_ path in the file. For example, if the file _taos.dll_ is in the directory of _C:\Windows\System32_,then add the following line in file *javalibrarypath.txt*:
`C:\Windows\System32`
### TDengine operations in Matlab
After correct configuration, open Matlab:
- build a connection:
`conn = database(‘db’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://127.0.0.1:0/’)`
- Query:
`sql0 = [‘select * from tb’]`
`data = select(conn, sql0);`
- Insert a record:
`sql1 = [‘insert into tb values (now, 1)’]`
`exec(conn, sql1)`
Please refer to the file _examples\Matlab\TDengineDemo.m_ for more information.
## R
Users can use R language to access the TDengine server with the JDBC interface. At first, install JDBC package in R:
```R
install.packages('rJDBC', repos='http://cran.us.r-project.org')
```
Then use _library_ function to load the package:
```R
library('RJDBC')
```
Then load the TDengine JDBC driver:
```R
drv<-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-1.0.0-dist.jar", identifier.quote="\"")
```
If succeed, no error message will display. Then use the following command to try a database connection:
```R
conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","root","taosdata")
```
Please replace the IP address in the command above to the correct one. If no error message is shown, then the connection is established successfully. TDengine supports below functions in _RJDBC_ package:
- _dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)_: write the data in a data frame _iris_ to the table _test_ in the TDengine server. Parameter _overwrite_ must be _false_. _append_ must be _TRUE_ and the schema of the data frame _iris_ should be the same as the table _test_.
- _dbGetQuery(conn, "select count(*) from test")_: run a query command
- _dbSendUpdate(conn, "use db")_: run any non-query command.
- _dbReadTable(conn, "test"_): read all the data in table _test_
- _dbDisconnect(conn)_: close a connection
- _dbRemoveTable(conn, "test")_: remove table _test_
Below functions are **not supported** currently:
- _dbExistsTable(conn, "test")_: if talbe _test_ exists
- _dbListTables(conn)_: list all tables in the connection
[Telegraf]: www.taosdata.com
[download link]: https://portal.influxdata.com/downloads
[Telegraf document]: www.taosdata.com
[Grafana]: https://grafana.com
[Grafana download page]: https://grafana.com/grafana/download
[Grafana official document]: https://grafana.com/docs/
此差异已折叠。
此差异已折叠。
name: tdengine
base: core18
version: '2.1.1.0'
version: '2.1.3.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
......@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.1.1.0
- usr/lib/libtaos.so.2.1.3.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -51,10 +51,10 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: getResultTimePrecision
* Signature: (J)J
* Method: getResultTimePrecisionImp
* Signature: (JJ)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecision
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecisionImp
(JNIEnv *, jobject, jlong, jlong);
/*
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册