提交 5fa02912 编写于 作者: S Shengliang Guan

Merge from master

# Use the latest 2.1 version of CircleCI pipeline process engine. See: https://circleci.com/docs/2.0/configuration-reference
version: 2.1
# Use a package of configuration called an orb.
orbs:
# Declare a dependency on the welcome-orb
welcome: circleci/welcome-orb@0.4.1
# Orchestrate or schedule a set of jobs
workflows:
# Name the workflow "welcome"
welcome:
# Run the welcome/run job in its own container
jobs:
- welcome/run
......@@ -2,6 +2,7 @@ build/
.vscode/
.idea/
cmake-build-debug/
cmake-build-release/
cscope.out
.DS_Store
debug/
......
......@@ -10,3 +10,6 @@
[submodule "tests/examples/rust"]
path = tests/examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
[submodule "deps/jemalloc"]
path = deps/jemalloc
url = https://github.com/jemalloc/jemalloc
......@@ -3,7 +3,7 @@ IF (CMAKE_VERSION VERSION_LESS 3.0)
PROJECT(TDengine CXX)
SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}")
SET(PROJECT_VERSION_MINOR "${LIB_MINOR_VERSION}")
SET(PROJECT_VERSION_PATCH"${LIB_PATCH_VERSION}")
SET(PROJECT_VERSION_PATCH "${LIB_PATCH_VERSION}")
SET(PROJECT_VERSION "${LIB_VERSION_STRING}")
ELSE ()
CMAKE_POLICY(SET CMP0048 NEW)
......@@ -42,6 +42,13 @@ INCLUDE(cmake/env.inc)
INCLUDE(cmake/version.inc)
INCLUDE(cmake/install.inc)
IF (CMAKE_SYSTEM_NAME MATCHES "Linux")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pipe -Wall -Wshadow -Werror")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pipe -Wall -Wshadow -Werror")
ENDIF ()
MESSAGE(STATUS "CMAKE_C_FLAGS: ${CMAKE_C_FLAGS}")
MESSAGE(STATUS "CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}")
ADD_SUBDIRECTORY(deps)
ADD_SUBDIRECTORY(src)
ADD_SUBDIRECTORY(tests)
......
......@@ -94,7 +94,7 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
pip3 install ${WKC}/src/connector/python/
pip3 install ${WKC}/src/connector/python
'''
return 1
}
......
......@@ -116,7 +116,7 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
在X86-64、X86、arm64 和 arm32 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
在X86-64、X86、arm64、arm32 和 mips64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
aarch64:
......@@ -130,6 +130,12 @@ aarch32:
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
mips64:
```bash
cmake .. -DCPUTYPE=mips64 && cmake --build .
```
### Windows 系统
如果你使用的是 Visual Studio 2013 版本:
......
......@@ -110,7 +110,7 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 and arm32 platform.
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform.
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct:
aarch64:
......@@ -123,13 +123,18 @@ aarch32:
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
mips64:
```bash
cmake .. -DCPUTYPE=mips64 && cmake --build .
```
### On Windows platform
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
Please specify "x86_amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
Please specify "amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```cmd
mkdir debug && cd debug
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < x86_amd64 | x86 >
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < amd64 | x86 >
cmake .. -G "NMake Makefiles"
nmake
```
......
......@@ -59,6 +59,11 @@ IF (TD_LINUX_64)
MESSAGE(STATUS "linux64 is defined")
SET(COMMON_FLAGS "-Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV)
IF (JEMALLOC_ENABLED)
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
ENDIF ()
ENDIF ()
IF (TD_LINUX_32)
......@@ -165,9 +170,12 @@ IF (TD_WINDOWS)
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF ()
IF (TD_MEMORY_SANITIZER)
MESSAGE("memory sanitizer detected as true")
SET(DEBUG_FLAGS "/fsanitize=address /Zi /W3 /GL")
ELSE ()
MESSAGE("memory sanitizer detected as false")
SET(DEBUG_FLAGS "/Zi /W3 /GL")
ENDIF ()
SET(RELEASE_FLAGS "/W0 /O3 /GL")
......
......@@ -73,6 +73,11 @@ IF (${RANDOM_NETWORK_FAIL} MATCHES "true")
MESSAGE(STATUS "build with random-network-fail enabled")
ENDIF ()
IF (${JEMALLOC_ENABLED} MATCHES "true")
SET(TD_JEMALLOC_ENABLED TRUE)
MESSAGE(STATUS "build with jemalloc enabled")
ENDIF ()
SET(TD_BUILD_JDBC TRUE)
IF (${BUILD_JDBC} MATCHES "false")
......
......@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.20.8")
SET(TD_VER_NUMBER "2.1.3.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
......@@ -18,3 +18,16 @@ ENDIF ()
IF (TD_DARWIN AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C)
ENDIF ()
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
MESSAGE("setup dpes/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR})
MESSAGE("binary dir:" ${CMAKE_BINARY_DIR})
include(ExternalProject)
ExternalProject_Add(jemalloc
PREFIX "jemalloc"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/
BUILD_COMMAND ${MAKE}
)
ENDIF ()
Subproject commit ea6b3e973b477b8061e0076bb257dbd7f3faa756
......@@ -36,6 +36,15 @@ static char monotonic_info_string[32];
static long mono_ticksPerMicrosecond = 0;
#ifdef _TD_NINGSI_60
// implement __rdtsc in ningsi60
uint64_t __rdtsc(){
unsigned int lo,hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return ((uint64_t)hi << 32) | lo;
}
#endif
static monotime getMonotonicUs_x86() {
return __rdtsc() / mono_ticksPerMicrosecond;
}
......
......@@ -15,6 +15,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [命令行程序TAOS](/getting-started#console):访问TDengine的简便方式
* [极速体验](/getting-started#demo):运行示例程序,快速体验高效的数据插入、查询
* [支持平台列表](/getting-started#platforms):TDengine服务器和客户端支持的平台列表
* [Kubernetes部署](https://taosdata.github.io/TDengine-Operator/zh/index.html):TDengine在Kubernetes环境进行部署的详细说明
## [整体架构](/architecture)
......@@ -80,7 +81,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## [与其他工具的连接](/connections)
* [Grafana](/connections#grafana):获取并可视化保存在TDengine的数据
* [Matlab](/connections#matlab):通过配置Matlab的JDBC数据源访问保存在TDengine的数据
* [MATLAB](/connections#matlab):通过配置MATLAB的JDBC数据源访问保存在TDengine的数据
* [R](/connections#r):通过配置R的JDBC数据源访问保存在TDengine的数据
* [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html):通过IDEA 数据库管理工具可视化使用 TDengine
......@@ -117,9 +118,9 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
## 常用工具
* [TDengine样例导入工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
* [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
* [TDengine写入性能测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html)
* [基于eletron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
* [基于Electron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
* [DataX,支持TDengine的离线数据采集/同步工具](https://github.com/wgzhao/DataX)(文档:[读取插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/reader/tdenginereader.md)[写入插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/writer/tdenginewriter.md)
## TDengine与其他数据库的对比测试
......
......@@ -9,8 +9,8 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的
* __10倍以上的性能提升__:定义了创新的数据存储结构,单核每秒能处理至少2万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。
* __硬件或云服务成本降至1/5__:由于超强性能,计算资源不到通用大数据方案的1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的1/10。
* __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件,大幅降低应用开发和维护的复杂度成本。
* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, Matlab随时进行。
* __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, MATLAB随时进行。
* __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类似标准SQL,支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似,零学习成本。
采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。
......
# 通过 Docker 快速体验 TDengine
虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。
下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。
## 下载 Docker
Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)
安装完毕后可以在命令行终端查看 Docker 版本。如果版本号正常输出,则说明 Docker 环境已经安装成功。
```bash
$ docker -v
Docker version 20.10.5, build 55c4c88
```
## 在 Docker 容器中运行 TDengine
1,使用命令拉取 TDengine 镜像,并使它在后台运行。
```bash
$ docker run -d tdengine/tdengine
cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316
```
- **docker run**:通过 Docker 运行一个容器。
- **-d**:让容器在后台运行。
- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像。
- **cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316**:这个返回的长字符是容器 ID,我们可以通过容器 ID 来查看对应的容器。
2,确认容器是否已经正确运行。
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS ···
cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
```
- **docker ps**:列出所有正在运行状态的容器信息。
- **CONTAINER ID**:容器 ID。
- **IMAGE**:使用的镜像。
- **COMMAND**:启动容器时运行的命令。
- **CREATED**:容器创建时间。
- **STATUS**:容器状态。UP 表示运行中。
3,进入 Docker 容器内,使用 TDengine。
```bash
$ docker exec -it cdf548465318 /bin/bash
root@cdf548465318:~/TDengine-server-2.0.13.0#
```
- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。
- **-i**:进入交互模式。
- **-t**:指定一个终端。
- **cdf548465318**:容器 ID,需要根据 docker ps 指令返回的值进行修改。
- **/bin/bash**:载入容器后运行 bash 来进行交互。
4,进入容器后,执行 taos shell 客户端程序。
```bash
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
```
TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。
在 TDengine 终端中,可以通过 SQL 命令来创建/删除数据库、表、超级表等,并可以进行插入和查询操作。具体可以参考 [TAOS SQL 说明文档](https://www.taosdata.com/cn/documentation/taos-sql)
## 通过 taosdemo 进一步了解 TDengine
1,接上面的步骤,先退出 TDengine 终端程序。
```bash
$ taos> q
root@cdf548465318:~/TDengine-server-2.0.13.0#
```
2,在命令行界面执行 taosdemo。
```bash
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taosdemo
###################################################################
# Server IP: localhost:0
# User: root
# Password: taosdata
# Use metric: true
# Datatype of Columns: int int int int int int int float
# Binary Length(If applicable): -1
# Number of Columns per record: 3
# Number of Threads: 10
# Number of Tables: 10000
# Number of Data per Table: 100000
# Records/Request: 1000
# Database name: test
# Table prefix: t
# Delete method: 0
# Test time: 2021-04-13 02:05:20
###################################################################
```
回车后,该命令将新建一个数据库 test,并且自动创建一张超级表 meters,并以超级表 meters 为模版创建了 1 万张表,表名从 "t0" 到 "t9999"。每张表有 10 万条记录,每条记录有 f1,f2,f3 三个字段,时间戳 ts 字段从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:41:39 999"。每张表带有 areaid 和 loc 两个标签 TAG,areaid 被设置为 1 到 10,loc 被设置为 "beijing" 或 "shanghai"。
3,进入 TDengine 终端,查看 taosdemo 生成的数据。
- **进入命令行。**
```bash
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
```
- **查看数据库。**
```bash
$ taos> show databases;
name | created_time | ntables | vgroups | ···
test | 2021-04-13 02:14:15.950 | 10000 | 6 | ···
log | 2021-04-12 09:36:37.549 | 4 | 1 | ···
```
- **查看超级表。**
```bash
$ taos> use test;
Database changed.
$ taos> show stables;
name | created_time | columns | tags | tables |
=====================================================================================
meters | 2021-04-13 02:14:15.955 | 4 | 2 | 10000 |
Query OK, 1 row(s) in set (0.001737s)
```
- **查看表,限制输出十条。**
```bash
$ taos> select * from test.t0 limit 10;
ts | f1 | f2 | f3 |
====================================================================
2017-07-14 02:40:01.000 | 3 | 9 | 0 |
2017-07-14 02:40:02.000 | 0 | 1 | 2 |
2017-07-14 02:40:03.000 | 7 | 2 | 3 |
2017-07-14 02:40:04.000 | 9 | 4 | 5 |
2017-07-14 02:40:05.000 | 1 | 2 | 5 |
2017-07-14 02:40:06.000 | 6 | 3 | 2 |
2017-07-14 02:40:07.000 | 4 | 7 | 8 |
2017-07-14 02:40:08.000 | 4 | 6 | 6 |
2017-07-14 02:40:09.000 | 5 | 7 | 7 |
2017-07-14 02:40:10.000 | 1 | 5 | 0 |
Query OK, 10 row(s) in set (0.003638s)
```
- **查看 t0 表的标签值。**
```bash
$ taos> select areaid, loc from test.t0;
areaid | loc |
===========================
10 | shanghai |
Query OK, 1 row(s) in set (0.002904s)
```
## 停止正在 Docker 中运行的 TDengine 服务
```bash
$ docker stop cdf548465318
cdf548465318
```
- **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。
- **cdf548465318**:容器 ID,根据 docker ps 指令返回的结果进行修改。
## 编程开发时连接在 Docker 中的 TDengine
从 Docker 之外连接使用在 Docker 容器内运行的 TDengine 服务,有以下两个思路:
1,通过端口映射(-p),将容器内部开放的网络端口映射到宿主机的指定端口上。通过挂载本地目录(-v),可以实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。
```bash
$ docker run -d -v /etc/taos:/etc/taos -p 6041:6041 tdengine/tdengine
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}
```
- 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。
- 第二条命令,通过 RESTful 接口访问 TDengine,这时连接的是本机的 6041 端口,可见连接成功。
注意:在这个示例中,出于方便性考虑,只映射了 RESTful 需要的 6041 端口。如果希望以非 RESTful 方式连接 TDengine 服务,则需要映射从 6030 开始的共 11 个端口(完整的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))。在例子中,挂载本地目录也只是处理了配置文件所在的 /etc/taos 目录,而没有挂载数据存储目录。
2,直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。
```bash
$ docker exec -it 526aa188da /bin/bash
```
......@@ -10,7 +10,9 @@ TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版
### 通过Docker容器运行
请参考[TDengine官方Docker镜像的发布、下载和使用](https://www.taosdata.com/blog/2020/05/13/1509.html)
暂时不建议生产环境采用 Docker 来部署 TDengine 的客户端或服务端,但在开发环境下或初次尝试时,使用 Docker 方式部署是十分方便的。特别是,利用 Docker,可以方便地在 Mac OSX 和 Windows 环境下尝试 TDengine。
详细操作方法请参照 [通过Docker快速体验TDengine](https://www.taosdata.com/cn/documentation/getting-started/docker)
### <a class="anchor" id="package-install"></a>通过安装包安装
......@@ -22,7 +24,7 @@ TDengine的安装非常简单,从下载到安装成功仅仅只要几秒钟。
## <a class="anchor" id="start"></a>轻松启动
安装成功后,用户可使用`systemctl`命令来启动TDengine的服务进程。
安装成功后,用户可使用 `systemctl` 命令来启动 TDengine 的服务进程。
```bash
$ systemctl start taosd
......@@ -33,21 +35,22 @@ $ systemctl start taosd
$ systemctl status taosd
```
如果TDengine服务正常工作,那么您可以通过TDengine的命令行程序`taos`来访问并体验TDengine。
如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 `taos` 来访问并体验 TDengine。
**注意:**
- systemctl命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo
- 为更好的获得产品反馈,改善产品,TDengine会采集基本的使用信息,但您可以修改系统配置文件taos.cfg里的配置参数telemetryReporting, 将其设为0,就可将其关闭。
- TDengine采用FQDN(一般就是hostname)作为节点的ID,为保证正常运行,需要给运行taosd的服务器配置好hostname,在客户端应用运行的机器配置好DNS服务或hosts文件,保证FQDN能够解析。
- systemctl 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo 。
- 为更好的获得产品反馈,改善产品,TDengine 会采集基本的使用信息,但您可以修改系统配置文件 taos.cfg 里的配置参数 telemetryReporting, 将其设为 0,就可将其关闭。
- TDengine 采用 FQDN (一般就是 hostname )作为节点的 ID,为保证正常运行,需要给运行 taosd 的服务器配置好 hostname,在客户端应用运行的机器配置好 DNS 服务或 hosts 文件,保证 FQDN 能够解析。
- `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。
* TDengine 支持在使用[`systemd`](https://en.wikipedia.org/wiki/Systemd)做进程服务管理的linux系统上安装,用`which systemctl`命令来检测系统中是否存在`systemd`包:
* TDengine 支持在使用 [`systemd`](https://en.wikipedia.org/wiki/Systemd) 做进程服务管理的 linux 系统上安装,用 `which systemctl` 命令来检测系统中是否存在 `systemd` 包:
```bash
$ which systemctl
```
如果系统中不支持systemd,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。
如果系统中不支持 systemd,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。
## <a class="anchor" id="console"></a>TDengine命令行程序
......
......@@ -107,9 +107,9 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
![replica-forward.png](page://images/architecture/replica-forward.png)
1. 应用对写请求做基本的合法性检查,通过,则给请求包打上一个版本号(version, 单调递增)
1. 应用对写请求做基本的合法性检查,通过,则给请求包打上一个版本号(version, 单调递增)
2. 应用将打上版本号的写请求封装一个WAL Head, 写入WAL(Write Ahead Log)
3. 应用调用API syncForwardToPeer,如vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。
3. 应用调用API syncForwardToPeer,如vnode B是slave状态,sync模块将包含WAL Head的数据包通过Forward消息发送给vnode B,否则就不转发。
4. vnode B收到Forward消息后,调用回调函数writeToCache, 交给应用处理
5. vnode B应用在写入成功后,都需要调用syncAckForward通知sync模块已经写入成功。
6. 如果quorum大于1,vnode B需要等待应用的回复确认,收到确认后,vnode B发送Forward Response消息给node A。
......@@ -219,7 +219,7 @@ Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系
不同之处:
- 选举流程不一样:Raft里任何一个节点是candidate时,主动向其他节点发出vote request, 如果超过半数回答Yes, 这个candidate就成为Leader,开始一个新的term. 而TDengine的实现里,节点上线、离线或角色改变都会触发状态消息在节点组类传播,等节点组里状态稳定一致之后才触发选举流程,因为状态稳定一致,基于同样的状态信息,每个节点做出的决定会是一致的,一旦某个节点符合成为master的条件,无需其他节点认可,它会自动将自己设为master。TDengine里,任何一个节点检测到其他节点或自己的角色发生改变,就会给节点组内其他节点进行广播的。Raft里不存在这样的机制,因此需要投票来解决。
- 选举流程不一样:Raft里任何一个节点是candidate时,主动向其他节点发出vote request,如果超过半数回答Yes,这个candidate就成为Leader,开始一个新的term。而TDengine的实现里,节点上线、离线或角色改变都会触发状态消息在节点组内传播,等节点组里状态稳定一致之后才触发选举流程,因为状态稳定一致,基于同样的状态信息,每个节点做出的决定会是一致的,一旦某个节点符合成为master的条件,无需其他节点认可,它会自动将自己设为master。TDengine里,任何一个节点检测到其他节点或自己的角色发生改变,就会向节点组内其他节点进行广播。Raft里不存在这样的机制,因此需要投票来解决。
- 对WAL的一条记录,Raft用term + index来做唯一标识。但TDengine只用version(类似index),在TDengine实现里,仅仅用version是完全可行的, 因为TDengine的选举机制,没有term的概念。
如果整个虚拟节点组全部宕机,重启,但不是所有虚拟节点都上线,这个时候TDengine是不会选出master的,因为未上线的节点有可能有最高version的数据。而RAFT协议,只要超过半数上线,就会选出Leader。
......
......@@ -176,9 +176,9 @@ TDengine 分布式架构的逻辑结构图如下:
**通讯方式:**TDengine系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过TCP/UDP进行的。因为考虑到物联网场景,数据写入的包一般不大,因此TDengine 除采用TCP做传输之外,还采用UDP方式,因为UDP 更加高效,而且不受连接数的限制。TDengine实现了自己的超时、重传、确认等机制,以确保UDP的可靠传输。对于数据量不到15K的数据包,采取UDP的方式进行传输,超过15K的,或者是查询类的操作,自动采取TCP的方式进行传输。同时,TDengine根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用TCP方式进行数据传输。
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。另外,这个参数值的长度需要控制在 96 个字符以内。
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)
**集群对外连接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号)。通过命令行CLI启动应用taos时,可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号,如果端口不配置,将采用TDengine的系统配置参数serverPort。
......@@ -343,7 +343,7 @@ TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久
对于采集的数据,一般有保留时长,这个时长由系统配置参数keep决定。超过这个设置天数的数据文件,将被系统自动删除,释放存储空间。
给定days与keep两个参数,一个vnode总的数据文件数为:keep/days。总的数据文件个数不宜过大,也不宜过小。10到100以内合适。基于这个原则,可以设置合理的days。 目前的版本,参数keep可以修改,但对于参数days,一但设置后,不可修改。
给定days与keep两个参数,一个典型工作状态的vnode中总的数据文件数为:`向上取整(keep/days)+1`。总的数据文件个数不宜过大,也不宜过小。10到100以内合适。基于这个原则,可以设置合理的days。 目前的版本,参数keep可以修改,但对于参数days,一但设置后,不可修改。
在每个数据文件里,一张表的数据是一块一块存储的。一张表可以有一到多个数据文件块。在一个文件块里,数据是列式存储的,占用的是一片连续的存储空间,这样大大提高读取速度。文件块的大小由系统参数maxRows(每块最大记录条数)决定,缺省值为4096。这个值不宜过大,也不宜过小。过大,定位具体时间段的数据的搜索时间会变长,影响读取速度;过小,数据块的索引太大,压缩效率偏低,也影响读取速度。
......
......@@ -12,7 +12,7 @@ TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, G
- 时间戳对齐的连接查询(Join Query: 隐式连接)操作
- 多种聚合/计算函数: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff等
例如:在TAOS Shell中,从表d1001中查询出vlotage > 215的记录,按时间降序排列,仅仅输出2条。
例如:在TAOS Shell中,从表d1001中查询出voltage > 215的记录,按时间降序排列,仅仅输出2条。
```mysql
taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
ts | current | voltage | phase |
......
......@@ -120,7 +120,7 @@ if (async) {
}
```
TDengine中的订阅既可以是同步的,也可以是异步的,上面的代码会根据从命令行获取的参数`async`的值来决定使用哪种方式。这里,同步的意思是用户程序要直接调用`taos_consume`来拉取数据,而异步则由API在内部的另一个线程中调用`taos_consume`,然后把拉取到的数据交给回调函数`subscribe_callback`去处理。
TDengine中的订阅既可以是同步的,也可以是异步的,上面的代码会根据从命令行获取的参数`async`的值来决定使用哪种方式。这里,同步的意思是用户程序要直接调用`taos_consume`来拉取数据,而异步则由API在内部的另一个线程中调用`taos_consume`,然后把拉取到的数据交给回调函数`subscribe_callback`去处理。(注意,`subscribe_callback` 中不宜做较为耗时的操作,否则有可能导致客户端阻塞等不可控的问题。)
参数`taos`是一个已经建立好的数据库连接,在同步模式下无特殊要求。但在异步模式下,需要注意它不会被其它线程使用,否则可能导致不可预计的错误,因为回调函数在API的内部线程中被调用,而TDengine的部分API不是线程安全的。
......
......@@ -16,7 +16,6 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query)。
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。
......@@ -50,6 +49,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
</tr>
</table>
注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。
## 如何获取 taos-jdbcdriver
......@@ -267,7 +267,9 @@ while(resultSet.next()){
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 处理异常
在报错后,通过SQLException可以获取到错误的信息和错误码:
```java
try (Statement statement = connection.createStatement()) {
// executeQuery
......@@ -280,11 +282,90 @@ try (Statement statement = connection.createStatement()) {
e.printStackTrace();
}
```
JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(错误码在0x2301到0x2350之间),JNI方法的报错(错误码在0x2351到0x2400之间),TDengine其他功能模块的报错。
具体的错误码请参考:
* https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
* https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
### <a class="anchor" id="stmt-java"></a>通过参数绑定写入数据
从 2.1.2.0 版本开始,TDengine 的 **JDBC-JNI** 实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。(注意:**JDBC-RESTful** 实现并不提供参数绑定这种使用方式。)
```java
Statement stmt = conn.createStatement();
Random r = new Random();
// INSERT 语句中,VALUES 部分允许指定具体的数据列;如果采取自动建表,则 TAGS 部分需要设定全部 TAGS 列的参数值:
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)");
// 设定数据表名:
s.setTableName("w1");
// 设定 TAGS 取值:
s.setTagInt(0, r.nextInt(10));
s.setTagString(1, "Beijing");
int numOfRows = 10;
// VALUES 部分以逐列的方式进行设置:
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
ArrayList<Integer> s1 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
s1.add(r.nextInt(100));
}
s.setInt(1, s1);
ArrayList<String> s2 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
s2.add("test" + r.nextInt(100));
}
s.setString(2, s2, 10);
// AddBatch 之后,缓存并未清空。为避免混乱,并不推荐在 ExecuteBatch 之前再次绑定新一批的数据:
s.columnDataAddBatch();
// 执行绑定数据后的语句:
s.columnDataExecuteBatch();
// 执行语句后清空缓存。在清空之后,可以复用当前的对象,绑定新的一批数据(可以是新表名、新 TAGS 值、新 VALUES 值):
s.columnDataClearBatch();
// 执行完毕,释放资源:
s.columnDataCloseBatch();
```
用于设定 TAGS 取值的方法总共有:
```java
public void setTagNull(int index, int type)
public void setTagBoolean(int index, boolean value)
public void setTagInt(int index, int value)
public void setTagByte(int index, byte value)
public void setTagShort(int index, short value)
public void setTagLong(int index, long value)
public void setTagTimestamp(int index, long value)
public void setTagFloat(int index, float value)
public void setTagDouble(int index, double value)
public void setTagString(int index, String value)
public void setTagNString(int index, String value)
```
用于设定 VALUES 数据列的取值的方法总共有:
```java
public void setInt(int columnIndex, ArrayList<Integer> list) throws SQLException
public void setFloat(int columnIndex, ArrayList<Float> list) throws SQLException
public void setTimestamp(int columnIndex, ArrayList<Long> list) throws SQLException
public void setLong(int columnIndex, ArrayList<Long> list) throws SQLException
public void setDouble(int columnIndex, ArrayList<Double> list) throws SQLException
public void setBoolean(int columnIndex, ArrayList<Boolean> list) throws SQLException
public void setByte(int columnIndex, ArrayList<Byte> list) throws SQLException
public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
```
其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。
### <a class="anchor" id="subscribe"></a>订阅
#### 创建
......@@ -447,7 +528,7 @@ Query OK, 1 row(s) in set (0.000141s)
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
## <a class="anchor" id="version"></a>TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
......
......@@ -32,7 +32,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
**Linux**
**1. 从涛思官网(https://www.taosdata.com/cn/all-downloads/)下载**
**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载**
* X64硬件环境:TDengine-client-2.x.x.x-Linux-x64.tar.gz
......@@ -56,7 +56,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
*taos.tar.gz*:应用驱动安装包
*driver*:TDengine应用驱动driver
*connector*: 各种编程语言连接器(go/grafanaplugin/nodejs/python/JDBC)
*examples*: 各种编程语言的示例程序(c/C#/go/JDBC/matlab/python/R)
*examples*: 各种编程语言的示例程序(c/C#/go/JDBC/MATLAB/python/R)
运行install_client.sh进行安装
......@@ -68,7 +68,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、
**Windows x64/x86**
**1. 从涛思官网(https://www.taosdata.com/cn/all-downloads/)下载 :**
**1. 从[涛思官网](https://www.taosdata.com/cn/all-downloads/)下载 :**
* X64硬件环境:TDengine-client-2.X.X.X-Windows-x64.exe
......@@ -213,7 +213,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
- `int taos_result_precision(TAOS_RES *res)`
返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒`2` 代表纳秒
返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒。
- `TAOS_ROW taos_fetch_row(TAOS_RES *res)`
......@@ -291,9 +291,27 @@ typedef struct taosField {
TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。
### 参数绑定API
<a class="anchor" id="stmt"></a>
### 参数绑定 API
除了直接调用 `taos_query` 进行查询,TDengine也提供了支持参数绑定的Prepare API,与 MySQL 一样,这些API目前也仅支持用问号`?`来代表待绑定的参数,具体如下:
除了直接调用 `taos_query` 进行查询,TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 一样,这些 API 目前也仅支持用问号 `?` 来代表待绑定的参数。
从 2.1.1.0 和 2.1.2.0 版本开始,TDengine 大幅改进了参数绑定接口对数据写入(INSERT)场景的支持。这样在通过参数绑定接口写入数据时,就避免了 SQL 语法解析的资源消耗,从而在绝大多数情况下显著提升写入性能。此时的典型操作步骤如下:
1. 调用 `taos_stmt_init` 创建参数绑定对象;
2. 调用 `taos_stmt_prepare` 解析 INSERT 语句;
3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname` 来设置表名;
4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags` 来设置表名和 TAGS 的值;
5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param` 以单行的方式设置 VALUES 的值;
6. 调用 `taos_stmt_add_batch` 把当前绑定的参数加入批处理;
7. 可以重复第 3~6 步,为批处理加入更多的数据行;
8. 调用 `taos_stmt_execute` 执行已经准备好的批处理指令;
9. 执行完毕,调用 `taos_stmt_close` 释放所有资源。
说明:如果 `taos_stmt_execute` 执行成功,假如不需要改变 SQL 语句的话,那么是可以复用 `taos_stmt_prepare` 的解析结果,直接进行第 3~6 步绑定新数据的。但如果执行出错,那么并不建议继续在当前的环境上下文下继续工作,而是建议释放资源,然后从 `taos_stmt_init` 步骤重新开始。
除 C/C++ 语言外,TDengine 的 Java 语言 JNI Connector 也提供参数绑定接口支持,具体请另外参见:[参数绑定接口的 Java 用法](https://www.taosdata.com/cn/documentation/connector/java#stmt-java)
接口相关的具体函数如下(也可以参考 [apitest.c](https://github.com/taosdata/TDengine/blob/develop/tests/examples/c/apitest.c) 文件中使用对应函数的方式):
- `TAOS_STMT* taos_stmt_init(TAOS *taos)`
......@@ -301,11 +319,12 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
- `int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)`
解析一条sql语句,将解析结果和参数信息绑定到stmt上,如果参数length大于0,将使用此参数作为sql语句的长度,如等于0,将自动判断sql语句的长度。
解析一条 SQL 语句,将解析结果和参数信息绑定到 stmt 上,如果参数 length 大于 0,将使用此参数作为 SQL 语句的长度,如等于 0,将自动判断 SQL 语句的长度。
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
进行参数绑定,bind指向一个数组,需保证此数组的元素数量和顺序与sql语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL中的 MYSQL_BIND 一致,具体定义如下:
不如 `taos_stmt_bind_param_batch` 效率高,但可以支持非 INSERT 类型的 SQL 语句。
进行参数绑定,bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 一致,具体定义如下:
```c
typedef struct TAOS_BIND {
......@@ -319,9 +338,35 @@ typedef struct TAOS_BIND {
} TAOS_BIND;
```
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
(2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
当 SQL 语句中的表名和 TAGS 都使用了 `?` 占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列)。tags 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。
- `int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind)`
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下:
```c
typedef struct TAOS_MULTI_BIND {
int buffer_type;
void * buffer;
uintptr_t buffer_length;
int32_t * length;
char * is_null;
int num; // 列的个数,即 buffer 中的参数个数
} TAOS_MULTI_BIND;
```
- `int taos_stmt_add_batch(TAOS_STMT *stmt)`
将当前绑定的参数加入批处理中,调用此函数后,可以再次调用`taos_stmt_bind_param`绑定新的参数。需要注意,此函数仅支持 insert/import 语句,如果是select等其他SQL语句,将返回错误。
将当前绑定的参数加入批处理中,调用此函数后,可以再次调用 `taos_stmt_bind_param``taos_stmt_bind_param_batch` 绑定新的参数。需要注意,此函数仅支持 INSERT/IMPORT 语句,如果是 SELECT 等其他 SQL 语句,将返回错误。
- `int taos_stmt_execute(TAOS_STMT *stmt)`
......@@ -329,12 +374,17 @@ typedef struct TAOS_BIND {
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result`以释放资源。
获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result` 以释放资源。
- `int taos_stmt_close(TAOS_STMT *stmt)`
执行完毕,释放所有资源。
- `char * taos_stmt_errstr(TAOS_STMT *stmt)`
(2.1.3.0 版本新增)
用于在其他 stmt API 返回错误(返回错误码或空指针)时获取错误信息。
### 连续查询接口
TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时间段,对一张或多张数据库的表(数据流)进行各种实时聚合计算操作。操作简单,仅有打开、关闭流的API。具体如下:
......@@ -345,11 +395,11 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
* taos:已经建立好的数据库连接
* sql:SQL查询语句(仅能使用查询语句)
* fp:用户定义的回调函数指针,每次流式计算完成后,TDengine将查询的结果(TAOS_ROW)、查询状态(TAOS_RES)、用户定义参数(PARAM)传递给回调函数,在回调函数内,用户可以使用taos_num_fields获取结果集列数,taos_fetch_fields获取结果集每列数据的类型。
* stime:是流式计算开始的时间,如果是0,表示从现在开始,如果不为零,表示从指定的时间开始计算(UTC时间从1970/1/1算起的毫秒数)
* stime:是流式计算开始的时间。如果是“64位整数最小值”,表示从现在开始;如果不为“64位整数最小值”,表示从指定的时间开始计算(UTC时间从1970/1/1算起的毫秒数)。
* param:是应用提供的用于回调的一个参数,回调时,提供给应用
* callback: 第二个回调函数,会在连续查询自动停止时被调用。
返回值为NULL,表示创建成功,返回值不为空,表示成功。
返回值为NULL,表示创建失败;返回值不为空,表示成功。
- `void taos_close_stream (TAOS_STREAM *tstr)`
......@@ -377,6 +427,7 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
* res:查询结果集,注意结果集中可能没有记录
* param:调用 `taos_subscribe`时客户程序提供的附加参数
* code:错误码
**注意**:在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。
* `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
......@@ -515,7 +566,7 @@ conn.close()
- _TDengineCursor_ 类
参考python中help(taos.TDengineCursor)。
这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能线程共享使用,否则会导致返回结果出现错误。
这个类对应客户端进行的写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能线程共享使用,否则会导致返回结果出现错误。
- _connect_ 方法
......@@ -534,7 +585,9 @@ conn.close()
## <a class="anchor" id="restful"></a>RESTful Connector
为支持各种不同类型平台的开发,TDengine提供符合REST设计标准的API,即RESTful API。为最大程度降低学习成本,不同于其他数据库RESTful API的设计方法,TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库,仅需要一个URL。RESTful连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)
为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 RESTful API。为最大程度降低学习成本,不同于其他数据库 RESTful API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。RESTful 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)
注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。
### HTTP请求格式
......@@ -738,7 +791,7 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间
下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效
- httpPort: 对外提供RESTful服务的端口号,默认绑定到6041
- 对外提供RESTful服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改)
- httpMaxThreads: 启动的线程数量,默认为2(2.0.17版本开始,默认值改为CPU核数的一半向下取整)
- restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240
- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式
......@@ -848,7 +901,7 @@ go env -w GOPROXY=https://goproxy.io,direct
Node.js连接器支持的系统有:
| **CPU类型** | x64(64bit) | | | aarch64 | aarch32 |
|**CPU类型** | x64(64bit) | | | aarch64 | aarch32 |
| ------------ | ------------ | -------- | -------- | -------- | -------- |
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
......
......@@ -16,7 +16,7 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin
以CentOS 7.2操作系统为例,将grafanaplugin目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。
```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/tdengine
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
```
### 使用 Grafana
......@@ -75,50 +75,45 @@ sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/tdengine
![img](page://images/connections/import_dashboard2.jpg)
## <a class="anchor" id="matlab"></a>Matlab
## <a class="anchor" id="matlab"></a>MATLAB
MatLab可以通过安装包内提供的JDBC Driver直接连接到TDengine获取数据到本地工作空间。
MATLAB 可以通过安装包内提供的 JDBC Driver 直接连接到 TDengine 获取数据到本地工作空间。
### MatLab的JDBC接口适配
### MATLAB 的 JDBC 接口适配
MatLab的适配有下面几个步骤,下面以Windows10上适配MatLab2017a为例:
MATLAB 的适配有下面几个步骤,下面以 Windows 10 上适配 MATLAB2021a 为例:
- 将TDengine安装包内的驱动程序JDBCDriver-1.0.0-dist.jar拷贝到${matlab_root}\MATLAB\R2017a\java\jar\toolbox
- 将TDengine安装包内的taos.lib文件拷贝至${matlab_ root _dir}\MATLAB\R2017a\lib\win64
- 将新添加的驱动jar包加入MatLab的classpath。在${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt文件中添加下面一行
- 将 TDengine 客户端安装路径下的 `\TDengine\connector\jdbc的驱动程序taos-jdbcdriver-2.0.25-dist.jar` 拷贝到 `${matlab_root}\MATLAB\R2021a\java\jar\toolbox`
- 将 TDengine 安装包内的 `taos.lib` 文件拷贝至 `${matlab_root_dir}\MATLAB\R2021\lib\win64`
- 将新添加的驱动 jar 包加入 MATLAB 的 classpath。在 `${matlab_root_dir}\MATLAB\R2021a\toolbox\local\classpath.txt` 文件中添加下面一行:
```
$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar
$matlabroot/java/jar/toolbox/taos-jdbcdriver-2.0.25-dist.jar
```
- 在${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a\下添加一个文件javalibrarypath.txt, 并在该文件中添加taos.dll的路径,比如您的taos.dll是在安装时拷贝到了C:\Windows\System32下,那么就应该在javalibrarypath.txt中添加如下一行:
-`${user_home}\AppData\Roaming\MathWorks\MATLAB\R2021a\` 下添加一个文件 `javalibrarypath.txt`,并在该文件中添加 taos.dll 的路径,比如您的 taos.dll 是在安装时拷贝到了 `C:\Windows\System32` 下,那么就应该在 `javalibrarypath.txt` 中添加如下一行:
```
C:\Windows\System32
```
### 在MatLab中连接TDengine获取数据
### 在 MATLAB 中连接 TDengine 获取数据
在成功进行了上述配置后,打开MatLab
在成功进行了上述配置后,打开 MATLAB
- 创建一个连接:
```matlab
conn = database(db, root, taosdata, com.taosdata.jdbc.TSDBDriver, jdbc:TSDB://127.0.0.1:0/)
conn = database(‘test’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://192.168.1.94:6030/’)
```
- 执行一次查询:
```matlab
sql0 = [‘select * from tb’]
data = select(conn, sql0);
```
- 插入一条记录:
```matlab
sql1 = [‘insert into tb values (now, 1)’]
exec(conn, sql1)
```
更多例子细节请参考安装包内examples\Matlab\TDengineDemo.m文件。
更多例子细节请参考安装包内 `examples\Matlab\TDengineDemo.m` 文件。
## <a class="anchor" id="r"></a>R
......
......@@ -55,12 +55,11 @@ arbitrator ha.taosdata.com:6042
| 4 | statusInterval | dnode向mnode报告状态时长 |
| 5 | arbitrator | 系统中裁决器的end point |
| 6 | timezone | 时区 |
| 7 | locale | 系统区位信息及编码格式 |
| 8 | charset | 字符集编码 |
| 9 | balance | 是否启动负载均衡 |
| 10 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
| 11 | maxVgroupsPerDb | 每个DB中能够使用的最大vgroup个数 |
| 7 | balance | 是否启动负载均衡 |
| 8 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
| 9 | maxVgroupsPerDb | 每个DB中能够使用的最大vgroup个数 |
备注:在 2.0.19.0 及更早的版本中,除以上 9 项参数外,dnode 加入集群时,还会要求 locale 和 charset 参数的取值也一致。
## <a class="anchor" id="node-one"></a>启动第一个数据节点
......
......@@ -99,9 +99,8 @@ taosd -C
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是工作的,一般无需设置。**注意:配置修改后,需要重启*taosd*服务才能生效。**
- firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。
- httpPort: RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求, 默认值为6041。
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。这个参数值的长度需要控制在 96 个字符以内。
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。(RESTful服务使用的端口号是在此基础上+11,即默认值为6041。)
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。
- arbitrator:系统中裁决器的end point, 缺省值为空。
......@@ -115,22 +114,24 @@ taosd -C
- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为 MB(2.0.15 以前的版本中,此参数的单位是字节)。
- ratioOfQueryCores: 设置查询线程的最大数量。最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。
**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。
**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)
不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数:
不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数(既可以作为 create database 指令的参数,也可以写在 taos.cfg 配置文件中用来设定创建新数据库时所采用的默认值)
- days:一个数据文件存储数据的时间跨度单位为天,默认值:10。
- keep:数据库中数据保留的天数单位为天,默认值:3650。(可通过 alter database 修改)
- minRows:文件块中记录的最小条数单位为条,默认值:100。
- maxRows:文件块中记录的最大条数单位为条,默认值:4096。
- comp:文件压缩标志位0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。(可通过 alter database 修改)
- walLevel:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。
- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。
- cache:内存块的大小单位为兆字节(MB),默认值:16。
- days:一个数据文件存储数据的时间跨度单位为天,默认值:10。
- keep:数据库中数据保留的天数单位为天,默认值:3650。(可通过 alter database 修改)
- minRows:文件块中记录的最小条数单位为条,默认值:100。
- maxRows:文件块中记录的最大条数单位为条,默认值:4096。
- comp:文件压缩标志位0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。(可通过 alter database 修改)
- wal:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。(在 taos.cfg 中参数名需要写作 walLevel)(可通过 alter database 修改)
- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。(可通过 alter database 修改)
- cache:内存块的大小单位为兆字节(MB),默认值:16。
- blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。(可通过 alter database 修改)
- replica:副本个数,取值范围:1-3。单位为个,默认值:1。(可通过 alter database 修改)
- precision:时间戳精度标识,ms表示毫秒,us表示微秒。默认值:ms。
- cacheLast:是否在内存中缓存子表 last_row,0:关闭;1:开启。默认值:0。(可通过 alter database 修改)(从 2.0.11 版本开始支持此参数)
- replica:副本个数。取值范围:1-3,单位为个,默认值:1。(可通过 alter database 修改)
- quorum:多副本环境下指令执行的确认数要求。取值范围:1、2,单位为个,默认值:1。(可通过 alter database 修改)
- precision:时间戳精度标识。ms表示毫秒,us表示微秒,默认值:ms。(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
- cacheLast:是否在内存中缓存子表的最近数据。0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。默认值:0。(可通过 alter database 修改)(从 2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。)
- update:是否允许更新。0:不允许;1:允许。默认值:0。(可通过 alter database 修改)
对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL:
......@@ -143,14 +144,17 @@ taosd -C
TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数必须与已有集群的配置相同,否则不能成功加入到集群中。会进行校验的参数如下:
- numOfMnodes:系统中管理节点个数。默认值:3。
- balance:是否启动负载均衡。0:否,1:是。默认值:1。
- mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值:4。
- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。
- statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
- arbitrator: 系统中裁决器的end point,缺省为空。
- timezone、locale、charset 的配置见客户端配置。
- timezone、locale、charset 的配置见客户端配置。(2.0.20.0 及以上的版本里,集群中加入新节点已不要求 locale 和 charset 参数取值一致)
- balance:是否启用负载均衡。0:否,1:是。默认值:1。
- flowctrl:是否启用非阻塞流控。0:否,1:是。默认值:1。
- slaveQuery:是否启用 slave vnode 参与查询。0:否,1:是。默认值:1。
- adjustMaster:是否启用 vnode master 负载均衡。0:否,1:是。默认值:1。
为方便调试,可通过SQL语句临时调整每个dnode的日志配置,系统重启后会失效:
......@@ -445,7 +449,7 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符
- 表的列名:不能包含特殊字符,不能超过 64 个字符
- 数据库名、表名、列名,都不能以数字开头
- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线”
- 表的列数:不能超过 1024 列
- 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置)
- 单条 SQL 语句默认最大字符串长度:65480 byte
......@@ -463,41 +467,41 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
| 关键字列表 | | | | |
| ---------- | ----------- | ------------ | ---------- | --------- |
| ABLOCKS | CONNECTIONS | GT | MNODES | SLIDING |
| ABORT | COPY | ID | MODULES | SLIMIT |
| ACCOUNT | COUNT | IF | NCHAR | SMALLINT |
| ACCOUNTS | CREATE | IGNORE | NE | SPREAD |
| ADD | CTIME | IMMEDIATE | NONE | STABLE |
| AFTER | DATABASE | IMPORT | NOT | STABLES |
| ALL | DATABASES | IN | NOTNULL | STAR |
| ALTER | DAYS | INITIALLY | NOW | STATEMENT |
| AND | DEFERRED | INSERT | OF | STDDEV |
| AS | DELIMITERS | INSTEAD | OFFSET | STREAM |
| ASC | DESC | INTEGER | OR | STREAMS |
| ATTACH | DESCRIBE | INTERVAL | ORDER | STRING |
| AVG | DETACH | INTO | PASS | SUM |
| BEFORE | DIFF | IP | PERCENTILE | TABLE |
| BEGIN | DISTINCT | IS | PLUS | TABLES |
| BETWEEN | DIVIDE | ISNULL | PRAGMA | TAG |
| BIGINT | DNODE | JOIN | PREV | TAGS |
| BINARY | DNODES | KEEP | PRIVILEGE | TBLOCKS |
| BITAND | DOT | KEY | QUERIES | TBNAME |
| BITNOT | DOUBLE | KILL | QUERY | TIMES |
| BITOR | DROP | LAST | RAISE | TIMESTAMP |
| BOOL | EACH | LE | REM | TINYINT |
| BOTTOM | END | LEASTSQUARES | REPLACE | TOP |
| BY | EQ | LIKE | REPLICA | TRIGGER |
| CACHE | EXISTS | LIMIT | RESET | UMINUS |
| CASCADE | EXPLAIN | LINEAR | RESTRICT | UPLUS |
| CHANGE | FAIL | LOCAL | ROW | USE |
| CLOG | FILL | LP | ROWS | USER |
| CLUSTER | FIRST | LSHIFT | RP | USERS |
| COLON | FLOAT | LT | RSHIFT | USING |
| COLUMN | FOR | MATCH | SCORES | VALUES |
| COMMA | FROM | MAX | SELECT | VARIABLE |
| COMP | GE | METRIC | SEMI | VGROUPS |
| CONCAT | GLOB | METRICS | SET | VIEW |
| CONFIGS | GRANTS | MIN | SHOW | WAVG |
| CONFLICT | GROUP | MINUS | SLASH | WHERE |
| CONNECTION | | | | |
| ABLOCKS | CONNECTIONS | HAVING | MODULES | SMALLINT |
| ABORT | COPY | ID | NCHAR | SPREAD |
| ACCOUNT | COUNT | IF | NE | STABLE |
| ACCOUNTS | CREATE | IGNORE | NONE | STABLES |
| ADD | CTIME | IMMEDIATE | NOT | STAR |
| AFTER | DATABASE | IMPORT | NOTNULL | STATEMENT |
| ALL | DATABASES | IN | NOW | STDDEV |
| ALTER | DAYS | INITIALLY | OF | STREAM |
| AND | DEFERRED | INSERT | OFFSET | STREAMS |
| AS | DELIMITERS | INSTEAD | OR | STRING |
| ASC | DESC | INTEGER | ORDER | SUM |
| ATTACH | DESCRIBE | INTERVAL | PASS | TABLE |
| AVG | DETACH | INTO | PERCENTILE | TABLES |
| BEFORE | DIFF | IP | PLUS | TAG |
| BEGIN | DISTINCT | IS | PRAGMA | TAGS |
| BETWEEN | DIVIDE | ISNULL | PREV | TBLOCKS |
| BIGINT | DNODE | JOIN | PRIVILEGE | TBNAME |
| BINARY | DNODES | KEEP | QUERIES | TIMES |
| BITAND | DOT | KEY | QUERY | TIMESTAMP |
| BITNOT | DOUBLE | KILL | RAISE | TINYINT |
| BITOR | DROP | LAST | REM | TOP |
| BOOL | EACH | LE | REPLACE | TOPIC |
| BOTTOM | END | LEASTSQUARES | REPLICA | TRIGGER |
| BY | EQ | LIKE | RESET | UMINUS |
| CACHE | EXISTS | LIMIT | RESTRICT | UNION |
| CASCADE | EXPLAIN | LINEAR | ROW | UPLUS |
| CHANGE | FAIL | LOCAL | ROWS | USE |
| CLOG | FILL | LP | RP | USER |
| CLUSTER | FIRST | LSHIFT | RSHIFT | USERS |
| COLON | FLOAT | LT | SCORES | USING |
| COLUMN | FOR | MATCH | SELECT | VALUES |
| COMMA | FROM | MAX | SEMI | VARIABLE |
| COMP | GE | METRIC | SET | VGROUPS |
| CONCAT | GLOB | METRICS | SHOW | VIEW |
| CONFIGS | GRANTS | MIN | SLASH | WAVG |
| CONFLICT | GROUP | MINUS | SLIDING | WHERE |
| CONNECTION | GT | MNODES | SLIMIT | |
......@@ -26,17 +26,17 @@
## 2. Windows平台下JDBCDriver找不到动态链接库,怎么办?
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/jdbcdriver找不到动态链接库/)
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)
## 3. 创建数据表时提示more dnodes are needed
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/创建数据表时提示more-dnodes-are-needed/)
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)
## 4. 如何让TDengine crash时生成core文件?
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/tdengine-crash时生成core文件的方法/)
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)
## 5. 遇到错误"Unable to establish connection", 我怎么办?
## 5. 遇到错误“Unable to establish connection”, 我怎么办?
客户端遇到连接故障,请按照下面的步骤进行检查:
......@@ -51,13 +51,13 @@
4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)
5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件。如果部署的是TDengine集群,客户端需要能ping通所有集群节点的FQDN。
6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确认TCP/UDP 端口6030-6042 是打开的
7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*
8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
8. 对于Windows上的JDBC, ODBC, Python, Go等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
9. 如果仍不能排除连接故障
......@@ -70,7 +70,8 @@
10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)
## 6. 遇到错误“Unexpected generic error in RPC”或者"TDengine Error: Unable to resolve FQDN", 我怎么办?
## 6. 遇到错误“Unexpected generic error in RPC”或者“Unable to resolve FQDN”,我怎么办?
产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查:
1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)
......@@ -102,7 +103,7 @@ TDengine 目前尚不支持删除功能,未来根据用户需求可能会支
批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。
## 12. 最有效的写入数据的方法是什么?windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
## 12. Windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
Windows下插入nchar类的数据中如果有中文,请先确认系统的地区设置成了中国(在Control Panel里可以设置),这时cmd中的`taos`客户端应该已经可以正常工作了;如果是在IDE里开发Java应用,比如Eclipse, Intellij,请确认IDE里的文件编码为GBK(这是Java默认的编码类型),然后在生成Connection时,初始化客户端的配置,具体语句如下:
```JAVA
......@@ -115,15 +116,15 @@ Connection = DriverManager.getConnection(url, properties);
## 13.JDBC报错: the excuted SQL is not a DML or a DDL?
请更新至最新的JDBC驱动
```JAVA
```xml
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.4</version>
<version>2.0.27</version>
</dependency>
```
## 14. taos connect failed, reason: invalid timestamp
## 14. taos connect failed, reason&#58; invalid timestamp
常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。
......@@ -157,7 +158,8 @@ ALTER LOCAL RESETLOG;
其含义是,清空本机所有由客户端生成的日志文件。
## <a class="anchor" id="timezone"></a>18. 时间戳的时区信息是怎样处理的?
<a class="anchor" id="timezone"></a>
## 18. 时间戳的时区信息是怎样处理的?
TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。
......@@ -166,3 +168,19 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。
3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
<a class="anchor" id="port"></a>
## 19. TDengine 都会用到哪些网络端口?
在 TDengine 2.0 版本中,会用到以下这些网络端口(以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么这里列举的端口都会出现变化),管理员可以参考这里的信息调整防火墙设置:
| 协议 | 默认端口 | 用途说明 | 修改方法 |
| :--- | :-------- | :---------------------------------- | :------------------------------- |
| TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 |
| TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
| TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 |
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。 |
| TCP | 6042 | Arbitrator 的服务端口。 | 因 Arbitrator 启动参数设置变化。 |
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
| UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 |
| UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
# TDengine Documentation
TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Model and Data Modeling sections. In addition to this document, you should also download and read our technology white paper. For the older TDengine version 1.6 documentation, please click here.
## [TDengine Introduction](/evaluation)
* [TDengine Introduction and Features](/evaluation#intro)
* [TDengine Use Scenes](/evaluation#scenes)
* [TDengine Performance Metrics and Verification]((/evaluation#))
## [Getting Started](/getting-started)
* [Quickly Install](/getting-started#install): install via source code/package / Docker within seconds
- [Easy to Launch](/getting-started#start): start / stop TDengine with systemctl
- [Command-line](/getting-started#console) : an easy way to access TDengine server
- [Experience Lightning Speed](/getting-started#demo): running a demo, inserting/querying data to experience faster speed
- [List of Supported Platforms](/getting-started#platforms): a list of platforms supported by TDengine server and client
- [Deploy to Kubernetes](https://taosdata.github.io/TDengine-Operator/en/index.html):a detailed guide for TDengine deployment in Kubernetes environment
## [Overall Architecture](/architecture)
- [Data Model](/architecture#model): relational database model, but one table for one device with static tags
- [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQL, support scale-out and high-reliability
- [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data will be separated from time-series data, segmented by vnode and time
- [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement is sent back to client, while supporting multi-replicas
- [Caching and Persistence](/architecture#persistence): latest records are cached in memory, but are written in columnar format with an ultra-high compression ratio
- [Data Query](/architecture#query): support various functions, time-axis aggregation, interpolation, and multi-table aggregation
## [Data Modeling](/model)
- [Create a Database](/model#create-db): create a database for all data collection points with similar features
- [Create a Super Table(STable)](/model#create-stable): create a STable for all data collection points with the same type
- [Create a Table](/model#create-table): use STable as the template, to create a table for each data collecting point
## [TAOS SQL](/taos-sql)
- [Data Types](/taos-sql#data-type): support timestamp, int, float, nchar, bool, and other types
- [Database Management](/taos-sql#management): add, drop, check databases
- [Table Management](/taos-sql#table): add, drop, check, alter tables
- [STable Management](/taos-sql#super-table): add, drop, check, alter STables
- [Tag Management](/taos-sql#tags): add, drop, alter tags
- [Inserting Records](/taos-sql#insert): support to write single/multiple items per table, multiple items across tables, and support to write historical data
- [Data Query](/taos-sql#select): support time segment, value filtering, sorting, manual paging of query results, etc
- [SQL Function](/taos-sql#functions): support various aggregation functions, selection functions, and calculation functions, such as avg, min, diff, etc
- [Time Dimensions Aggregation](/taos-sql#aggregation): aggregate and reduce the dimension after cutting table data by time segment
- [Boundary Restrictions](/taos-sql#limitation): restrictions for the library, table, SQL, and others
- [Error Code](/taos-sql/error-code): TDengine 2.0 error codes and corresponding decimal codes
## [Efficient Data Ingestion](/insert)
- [SQL Ingestion](/insert#sql): write one or multiple records into one or multiple tables via SQL insert command
- [Prometheus Ingestion](/insert#prometheus): Configure Prometheus to write data directly without any code
- [Telegraf Ingestion](/insert#telegraf): Configure Telegraf to write collected data directly without any code
- [EMQ X Broker](/insert#emq): Configure EMQ X to write MQTT data directly without any code
- [HiveMQ Broker](/insert#hivemq): Configure HiveMQ to write MQTT data directly without any code
## [Efficient Data Querying](/queries)
- [Main Query Features](/queries#queries): support various standard functions, setting filter conditions, and querying per time segment
- [Multi-table Aggregation Query](/queries#aggregation): use STable and set tag filter conditions to perform efficient aggregation queries
- [Downsampling to Query Value](/queries#sampling): aggregate data in successive time windows, support interpolation
## [Advanced Features](/advanced-features)
- [Continuous Query](/advanced-features#continuous-query): Based on sliding windows, the data stream is automatically queried and calculated at regular intervals
- [Data Publisher/Subscriber](/advanced-features#subscribe): subscribe to the newly arrived data like a typical messaging system
- [Cache](/advanced-features#cache): the newly arrived data of each device/table will always be cached
- [Alarm Monitoring](/advanced-features#alert): automatically monitor out-of-threshold data, and actively push it based-on configuration rules
## [Connector](/connector)
- [C/C++ Connector](/connector#c-cpp): primary method to connect to TDengine server through libtaos client library
- [Java Connector(JDBC)]: driver for connecting to the server from Java applications using the JDBC API
- [Python Connector](/connector#python): driver for connecting to TDengine server from Python applications
- [RESTful Connector](/connector#restful): a simple way to interact with TDengine via HTTP
- [Go Connector](/connector#go): driver for connecting to TDengine server from Go applications
- [Node.js Connector](/connector#nodejs): driver for connecting to TDengine server from Node.js applications
- [C# Connector](/connector#csharp): driver for connecting to TDengine server from C# applications
- [Windows Client](https://www.taosdata.com/blog/2019/07/26/514.html): compile your own Windows client, which is required by various connectors on the Windows environment
## [Connections with Other Tools](/connections)
- [Grafana](/connections#grafana): query the data saved in TDengine and provide visualization
- [MATLAB](/connections#matlab): access data stored in TDengine server via JDBC configured within MATLAB
- [R](/connections#r): access data stored in TDengine server via JDBC configured within R
- [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html): use TDengine visually through IDEA Database Management Tool
## [Installation and Management of TDengine Cluster](/cluster)
- [Preparation](/cluster#prepare): important considerations before deploying TDengine for production usage
- [Create Your First Node](/cluster#node-one): simple to follow the quick setup
- [Create Subsequent Nodes](/cluster#node-other): configure taos.cfg for new nodes to add more to the existing cluster
- [Node Management](/cluster#management): add, delete, and check nodes in the cluster
- [High-availability of Vnode](/cluster#high-availability): implement high-availability of Vnode through multi-replicas
- [Mnode Management](/cluster#mnode): automatic system creation without any manual intervention
- [Load Balancing](/cluster#load-balancing): automatically performed once the number of nodes or load changes
- [Offline Node Processing](/cluster#offline): any node that offline for more than a certain period will be removed from the cluster
- [Arbitrator](/cluster#arbitrator): used in the case of an even number of replicas to prevent split-brain
## [TDengine Operation and Maintenance](/administrator)
- [Capacity Planning](/administrator#planning): Estimating hardware resources based on scenarios
- [Fault Tolerance and Disaster Recovery](/administrator#tolerance): set the correct WAL and number of data replicas
- [System Configuration](/administrator#config): port, cache size, file block size, and other system configurations
- [User Management](/administrator#user): add/delete TDengine users, modify user password
- [Import Data](/administrator#import): import data into TDengine from either script or CSV file
- [Export Data](/administrator#export): export data either from TDengine shell or from the taosdump tool
- [System Monitor](/administrator#status): monitor the system connections, queries, streaming calculation, logs, and events
- [File Directory Structure](/administrator#directories): directories where TDengine data files and configuration files located
- [Parameter Restrictions and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter restrictions and reserved keywords
## TDengine Technical Design
- [System Module]: taosd functions and modules partitioning
- [Data Replication]: support real-time synchronous/asynchronous replication, to ensure high-availability of the system
- [Technical Blog](https://www.taosdata.com/cn/blog/?categories=3): More technical analysis and architecture design articles
## Common Tools
- [TDengine sample import tools](https://www.taosdata.com/blog/2020/01/18/1166.html)
- [TDengine performance comparison test tools](https://www.taosdata.com/blog/2020/01/18/1166.html)
- [Use TDengine visually through IDEA Database Management Tool](https://www.taosdata.com/blog/2020/08/27/1767.html)
## Performance: TDengine vs Others
- [Performance: TDengine vs InfluxDB with InfluxDB’s open-source performance testing tool](https://www.taosdata.com/blog/2020/01/13/1105.html)
- [Performance: TDengine vs OpenTSDB](https://www.taosdata.com/blog/2019/08/21/621.html)
- [Performance: TDengine vs Cassandra](https://www.taosdata.com/blog/2019/08/14/573.html)
- [Performance: TDengine vs InfluxDB](https://www.taosdata.com/blog/2019/07/19/419.html)
- [Performance Test Reports of TDengine vs InfluxDB/OpenTSDB/Cassandra/MySQL/ClickHouse](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
## More on IoT Big Data
- [Characteristics of IoT and Industry Internet Big Data](https://www.taosdata.com/blog/2019/07/09/characteristics-of-iot-big-data/)
- [Features and Functions of IoT Big Data platforms](https://www.taosdata.com/blog/2019/07/29/542.html)
- [Why don’t General Big Data Platforms Fit IoT Scenarios?](https://www.taosdata.com/blog/2019/07/09/why-does-the-general-big-data-platform-not-fit-iot-data-processing/)
- [Why TDengine is the best choice for IoT, Internet of Vehicles, and Industry Internet Big Data platforms?](https://www.taosdata.com/blog/2019/07/09/why-tdengine-is-the-best-choice-for-iot-big-data-processing/)
## FAQ
- [FAQ: Common questions and answers](/faq)
# TDengine Introduction
## <a class="anchor" id="intro"></a> About TDengine
TDengine is an innovative Big Data processing product launched by Taos Data in the face of the fast-growing Internet of Things (IoT) Big Data market and technical challenges. It does not rely on any third-party software, nor does it optimize or package any open-source database or stream computing product. Instead, it is a product independently developed after absorbing the advantages of many traditional relational databases, NoSQL databases, stream computing engines, message queues, and other software. TDengine has its own unique Big Data processing advantages in time-series space.
One of the modules of TDengine is the time-series database. However, in addition to this, to reduce the complexity of research and development and the difficulty of system operation, TDengine also provides functions such as caching, message queuing, subscription, stream computing, etc. TDengine provides a full-stack technical solution for the processing of IoT and Industrial Internet BigData. It is an efficient and easy-to-use IoT Big Data platform. Compared with typical Big Data platforms such as Hadoop, TDengine has the following distinct characteristics:
- **Performance improvement over 10 times**: An innovative data storage structure is defined, with each single core can process at least 20,000 requests per second, insert millions of data points, and read more than 10 million data points, which is more than 10 times faster than other existing general database.
- **Reduce the cost of hardware or cloud services to 1/5**: Due to its ultra-performance, TDengine’s computing resources consumption is less than 1/5 of other common Big Data solutions; through columnar storage and advanced compression algorithms, the storage consumption is less than 1/10 of other general databases.
- **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance.
- **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and MATLAB.
- **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to.
- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C + +/C#/Go/Node.js, and similar to MySQL with zero learning cost.
With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources.
![TDengine Technology Ecosystem](page://images/eco_system.png)
<center>Figure 1. TDengine Technology Ecosystem</center>
## <a class="anchor" id="scenes"></a>Overall Scenarios of TDengine
As an IoT Big Data platform, the typical application scenarios of TDengine are mainly presented in the IoT category, with users having a certain amount of data. The following sections of this document are mainly aimed at IoT-relevant systems. Other systems, such as CRM, ERP, etc., are beyond the scope of this article.
### Characteristics and Requirements of Data Sources
From the perspective of data sources, designers can analyze the applicability of TDengine in target application systems as following.
| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
| A huge amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure matching high compression ratio to achieve the best storage efficiency in the industry. |
| Data input velocity is occasionally or continuously huge | | | √ | TDengine's performance is much higher than other similar products. It can continuously process a large amount of input data in the same hardware environment, and provide a performance evaluation tool that can easily run in the user environment. |
| A huge amount of data sources | | | √ | TDengine is designed to include optimizations specifically for a huge amount of data sources, such as data writing and querying, which is especially suitable for efficiently processing massive (tens of millions or more) data sources. |
### System Architecture Requirements
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require a simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions, and no need to integrate any additional third-party products. |
| Require fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability functions such as fault tolerance and disaster recovery. |
| Standardization specifications | | | √ | TDengine uses standard SQL language to provide main functions and follow standardization specifications. |
### System Function Requirements
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require completed data processing algorithms built-in | | √ | | TDengine implements various general data processing algorithms, but has not properly handled all requirements of different industries, so special types of processing shall be processed at the application level. |
| Require a huge amount of crosstab queries | | √ | | This type of processing should be handled more by relational database systems, or TDengine and relational database systems should fit together to implement system functions. |
### System Performance Requirements
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require larger total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server-cooperating. |
| Require high-speed data processing | | | √ | TDengine’s storage and data processing are designed to be optimized for IoT, can generally improve the processing speed by multiple times than other similar products. |
| Require fast processing of fine-grained data | | | √ | TDengine has achieved the same level of performance with relational and NoSQL data processing systems. |
### System Maintenance Requirements
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
| Require system with high-reliability | | | √ | TDengine has a very robust and reliable system architecture to implement simple and convenient daily operation with streamlined experiences for operators, thus human errors and accidents are eliminated to the greatest extent. |
| Require controllable operation learning cost | | | √ | As above. |
| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counselling services. |
# Quick Start
## <a class="anchor" id="install"></a>Quick Install
TDegnine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDegnine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package).
### <a class="anchor" id="source-install"></a>Install from Source
Please visit our [TDengine github page](https://github.com/taosdata/TDengine) for instructions on installation from the source code.
### Install from Docker Container
Please visit our [TDengine Official Docker Image: Distribution, Downloading, and Usage](https://www.taosdata.com/blog/2020/05/13/1509.html).
### <a class="anchor" id="package-install"></a>Install from Package
It’s extremely easy to install for TDegnine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs:
Click [here](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) to download the install package.
For more about installation process, please refer [TDengine Installation Packages: Install and Uninstall](https://www.taosdata.com/blog/2019/08/09/566.html), and [Video Tutorials](https://www.taosdata.com/blog/2020/11/11/1941.html).
## <a class="anchor" id="start"></a>Quick Launch
After installation, you can start the TDengine service by the `systemctl` command.
```bash
$ systemctl start taosd
```
Then check if the service is working now.
```bash
$ systemctl status taosd
```
If the service is running successfully, you can play around through TDengine shell `taos`.
**Note:**
- The `systemctl` command needs the **root** privilege. Use **sudo** if you are not the **root** user.
- To get better product feedback and improve our solution, TDegnine will collect basic usage information, but you can modify the configuration parameter **telemetryReporting** in the system configuration file taos.cfg, and set it to 0 to turn it off.
- TDegnine uses FQDN (usually hostname) as the node ID. In order to ensure normal operation, you need to set hostname for the server running taosd, and configure DNS service or hosts file for the machine running client application, to ensure the FQDN can be resolved.
- TDengine supports installation on Linux systems with[ systemd ](https://en.wikipedia.org/wiki/Systemd)as the process service management, and uses `which systemctl` command to detect whether `systemd` packages exist in the system:
```bash
$ which systemctl
```
If `systemd` is not supported in the system, TDengine service can also be launched via `/usr/local/taos/bin/taosd` manually.
## <a class="anchor" id="console"></a>TDengine Shell Command Line
To launch TDengine shell, the command line interface, in a Linux terminal, type:
```bash
$ taos
```
The welcome message is printed if the shell connects to TDengine server successfully, otherwise, an error message will be printed (refer to our [FAQ](https://www.taosdata.com/en/faq) page for troubleshooting the connection error). The TDengine shell prompt is:
```cmd
taos>
```
In the TDengine shell, you can create databases, create tables and insert/query data with SQL. Each query command ends with a semicolon. It works like MySQL, for example:
```mysql
create database demo;
use demo;
create table t (ts timestamp, speed int);
insert into t values ('2019-07-15 00:00:00', 10);
insert into t values ('2019-07-15 01:00:00', 20);
select * from t;
ts | speed |
===================================
19-07-15 00:00:00.000| 10|
19-07-15 01:00:00.000| 20|
Query OK, 2 row(s) in set (0.001700s)
```
Besides the SQL commands, the system administrator can check system status, add or delete accounts, and manage the servers.
### Shell Command Line Parameters
You can configure command parameters to change how TDengine shell executes. Some frequently used options are listed below:
- -c, --config-dir: set the configuration directory. It is */etc/taos* by default.
- -h, --host: set the IP address of the server it will connect to. Default is localhost.
- -s, --commands: set the command to run without entering the shell.
- -u, -- user: user name to connect to server. Default is root.
- -p, --password: password. Default is 'taosdata'.
- -?, --help: get a full list of supported options.
Examples:
```bash
$ taos -h 192.168.0.1 -s "use db; show tables;"
```
### Run SQL Command Scripts
Inside TDengine shell, you can run SQL scripts in a file with source command.
```mysql
taos> source <filename>;
```
### Shell Tips
- Use up/down arrow key to check the command history
- To change the default password, use "alter user" command
- Use ctrl+c to interrupt any queries
- To clean the schema of local cached tables, execute command `RESET QUERY CACHE`
## <a class="anchor" id="demo"></a>Experience TDengine’s Lightning Speed
After starting the TDengine server, you can execute the command `taosdemo` in the Linux terminal.
```bash
$ taosdemo
```
Using this command, a STable named `meters` will be created in the database `test` There are 10k tables under this stable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai".
It takes about 10 minutes to execute this command. Once finished, 1 billion rows of records will be inserted.
In the TDengine client, enter sql query commands and then experience our lightning query speed.
- query total rows of records:
```mysql
taos> select count(*) from test.meters;
```
- query average, max and min of the total 1 billion records:
```mysql
taos> select avg(f1), max(f2), min(f3) from test.meters;
```
- query the number of records where loc="beijing":
```mysql
taos> select count(*) from test.meters where loc="beijing";
```
- query the average, max and min of total records where areaid=10:
```mysql
taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10;
```
- query the average, max, min from table t10 when aggregating over every 10s:
```mysql
taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
```
**Note**: you can run command `taosdemo` with many options, like number of tables, rows of records and so on. To know more about these options, you can execute `taosdemo --help` and then take a try using different options.
## Client and Alarm Module
If your client and server running on different machines, please install the client separately. Linux and Windows packages are provided:
- TDengine-client-2.0.10.0-Linux-x64.tar.gz(3.0M)
- TDengine-client-2.0.10.0-Windows-x64.exe(2.8M)
- TDengine-client-2.0.10.0-Windows-x86.exe(2.8M)
Linux package of Alarm Module is as following (please refer [How to Use Alarm Module](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)):
- TDengine-alert-2.0.10.0-Linux-x64.tar.gz (8.1M)
## <a class="anchor" id="platforms"></a>List of Supported Platforms
List of platforms supported by TDengine server
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | UnionTech UOS | NeoKylin | LINX V60/V80 |
| ------------------ | ---------------- | ------------------- | --------------- | ------------- | -------- | ------------ |
| X64 | ● | ● | | ○ | ● | ● |
| Raspberry ARM32 | | ● | ● | | | |
| Loongson MIPS64 | | | ● | | | |
| Kunpeng ARM64 | | ○ | ○ | | ● | |
| SWCPU Alpha64 | | | ○ | ● | | |
| FT ARM64 | | ○Ubuntu Kylin | | | | |
| Hygon X64 | ● | ● | ● | ○ | ● | ● |
| Rockchip ARM64/32 | | | ○ | | | |
| Allwinner ARM64/32 | | | ○ | | | |
| Actions ARM64/32 | | | ○ | | | |
| TI ARM32 | | | ○ | | | |
Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
List of platforms supported by TDengine client and connectors
At the moment, TDengine connectors can support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and development environments such as Linux/Win64/Win32.
Comparison matrix as following:
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS Godson** | **Alpha Shenwei** | **X64 TimecomTech** |
| ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | --------------- | ----------------- | ------------------- |
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** |
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
Please visit [Connectors](https://www.taosdata.com/en/documentation/connector) section for more detailed information.
此差异已折叠。
# Data Modeling
TDengine adopts a relational data model, so we need to build the "database" and "table". Therefore, for a specific application scenario, it is necessary to consider the design of the database, STable and ordinary table. This section does not discuss detailed syntax rules, but only concepts.
Please watch the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1945.html) for data modeling.
## <a class="anchor" id="create-db"></a> Create a Database
Different types of data collection points often have different data characteristics, including frequency of data collecting, length of data retention time, number of replicas, size of data blocks, whether to update data or not, and so on. To ensure TDengine working with great efficiency in various scenarios, TDengine suggests creating tables with different data characteristics in different databases, because each database can be configured with different storage strategies. When creating a database, in addition to SQL standard options, the application can also specify a variety of parameters such as retention duration, number of replicas, number of memory blocks, time accuracy, max and min number of records in a file block, whether it is compressed or not, and number of days a data file will be overwritten. For example:
```mysql
CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1;
```
The above statement will create a database named “power”. The data of this database will be kept for 365 days (it will be automatically deleted 365 days later), one data file created per 10 days, and the number of memory blocks is 4 for data updating. For detailed syntax and parameters, please refer to [Data Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#management).
After the database created, please use SQL command USE to switch to the new database, for example:
```mysql
USE power;
```
Replace the database operating in the current connection with “power”, otherwise, before operating on a specific table, you need to use "database name. table name" to specify the name of database to use.
**Note:**
- Any table or STable belongs to a database. Before creating a table, a database must be created first.
- Tables in two different databases cannot be JOIN.
## <a class="anchor" id="create-stable"></a> Create a STable
An IoT system often has many types of devices, such as smart meters, transformers, buses, switches, etc. for power grids. In order to facilitate aggregation among multiple tables, using TDengine, it is necessary to create a STable for each type of data collection point. Taking the smart meter in Table 1 as an example, you can use the following SQL command to create a STable:
```mysql
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);
```
**Note:** The STABLE keyword in this instruction needs to be written as TABLE in versions before 2.0.15.
Just like creating an ordinary table, you need to provide the table name (‘meters’ in the example) and the table structure Schema, that is, the definition of data columns. The first column must be a timestamp (‘ts’ in the example), the other columns are the physical metrics collected (current, volume, phase in the example), and the data types can be int, float, string, etc. In addition, you need to provide the schema of the tag (location, groupId in the example), and the data types of the tag can be int, float, string and so on. Static attributes of collection points can often be used as tags, such as geographic location of collection points, device model, device group ID, administrator ID, etc. The schema of the tag can be added, deleted and modified afterwards. Please refer to the [STable Management section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#super-table) for specific definitions and details.
Each type of data collection point needs an established STable, so an IoT system often has multiple STables. For the power grid, we need to build a STable for smart meters, transformers, buses, switches, etc. For IoT, a device may have multiple data collection points (for example, a fan for wind-driven generator, some collection points capture parameters such as current and voltage, and some capture environmental parameters such as temperature, humidity and wind direction). In this case, multiple STables need to be established for corresponding types of devices. All collected physical metrics contained in one and the same STable must be collected at the same time (with a consistent timestamp).
A STable allows up to 1024 columns. If the number of physical metrics collected at a collection point exceeds 1024, multiple STables need to be built to process them. A system can have multiple DBs, and a DB can have one or more STables.
## <a class="anchor" id="create-table"></a> Create a Table
TDengine builds a table independently for each data collection point. Similar to standard relational data, one table has a table name, Schema, but in addition, it can also carry one or more tags. When creating, you need to use the STable as a template and specify the specific value of the tag. Taking the smart meter in Table 1 as an example, the following SQL command can be used to build the table:
```mysql
CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);
```
Where d1001 is the table name, meters is the name of the STable, followed by the specific tag value of tag Location as "Beijing.Chaoyang", and the specific tag value of tag groupId 2. Although the tag value needs to be specified when creating the table, it can be modified afterwards. Please refer to the [Table Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#table) for details.
**Note: ** At present, TDengine does not technically restrict the use of a STable of a database (dbA) as a template to create a sub-table of another database (dbB). This usage will be prohibited later, and it is not recommended to use this method to create a table.
TDengine suggests to use the globally unique ID of data collection point as a table name (such as device serial number). However, in some scenarios, there is no unique ID, and multiple IDs can be combined into a unique ID. It is not recommended to use a unique ID as tag value.
**Automatic table creating** : In some special scenarios, user is not sure whether the table of a certain data collection point exists when writing data. In this case, the non-existent table can be created by using automatic table building syntax when writing data. If the table already exists, no new table will be created. For example:
```mysql
INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);
```
The SQL statement above inserts records (now, 10.2, 219, 0.32) into table d1001. If table d1001 has not been created yet, the STable meters is used as the template to automatically create it, and the tag value "Beijing.Chaoyang", 2 is marked at the same time.
For detailed syntax of automatic table building, please refer to the "[Automatic Table Creation When Inserting Records](https://www.taosdata.com/en/documentation/taos-sql#auto_create_table)" section.
## Multi-column Model vs Single-column Model
TDengine supports multi-column model. As long as physical metrics are collected simultaneously by a data collection point (with a consistent timestamp), these metrics can be placed in a STable as different columns. However, there is also an extreme design, a single-column model, in which each collected physical metric is set up separately, so each type of physical metrics is set up separately with a STable. For example, create 3 Stables, one each for current, voltage and phase.
TDengine recommends using multi-column model as much as possible because of higher insertion and storage efficiency. However, for some scenarios, types of collected metrics often change. In this case, if multi-column model is adopted, the structure definition of STable needs to be frequently modified so make the application complicated. To avoid that, single-column model is recommended.
# Efficient Data Writing
TDengine supports multiple interfaces to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in a single piece or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, nonsequential data insertion, and also historical data insertion.
## <a class="anchor" id="sql"></a> SQL Writing
Applications insert data by executing SQL insert statements through C/C + +, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
```mysql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
```
TDengine supports writing multiple records at a time. For example, the following command writes two records to table d1001:
```mysql
INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25);
```
TDengine also supports writing data to multiple tables at a time. For example, the following command writes two records to d1001 and one record to d1002:
```mysql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
```
For the SQL INSERT Grammar, please refer to [Taos SQL insert](https://www.taosdata.com/en/documentation/taos-sql#insert)
**Tips:**
- To improve writing efficiency, batch writing is required. The more records written in a batch, the higher the insertion efficiency. However, a record cannot exceed 16K, and the total length of an SQL statement cannot exceed 64K (it can be configured by parameter maxSQLLength, and the maximum can be configured to 1M).
- TDengine supports multi-thread parallel writing. To further improve writing speed, a client needs to open more than 20 threads to write parallelly. However, after the number of threads reaches a certain threshold, it cannot be increased or even become decreased, because too much frequent thread switching brings extra overhead.
- For a same table, if the timestamp of a newly inserted record already exists, (no database was created using UPDATE 1) the new record will be discarded as default, that is, the timestamp must be unique in a table. If an application automatically generates records, it is very likely that the generated timestamps will be the same, so the number of records successfully inserted will be smaller than the number of records the application try to insert. If you use UPDATE 1 option when creating a database, inserting a new record with the same timestamp will overwrite the original record.
- The timestamp of written data must be greater than the current time minus the time of configuration parameter keep. If keep is configured for 3650 days, data older than 3650 days cannot be written. The timestamp for writing data cannot be greater than the current time plus configuration parameter days. If days is configured to 2, data 2 days later than the current time cannot be written.
## <a class="anchor" id="prometheus"></a> Direct Writing of Prometheus
As a graduate project of Cloud Native Computing Foundation, [Prometheus](https://www.prometheus.io/) is widely used in the field of performance monitoring and K8S performance monitoring. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Prometheus without any code, and can directly write the data collected by Prometheus into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
### Compile blm_prometheus From Source
Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares:
- A server running Linux OS
- Golang version 1.10 and higher installed
- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
Bailongma project has a folder, blm_prometheus, which holds the prometheus writing API. The compiling process is as follows:
```bash
cd blm_prometheus
go build
```
If everything goes well, an executable of blm_prometheus will be generated in the corresponding directory.
### Install Prometheus
Download and install as the instruction of Prometheus official website. [Download Address](https://prometheus.io/download/)
### Configure Prometheus
Read the Prometheus [configuration document](https://prometheus.io/docs/prometheus/latest/configuration/configuration/) and add following configurations in the section of Prometheus configuration file
- url: The URL provided by bailongma API service, refer to the blm_prometheus startup example section below
After Prometheus launched, you can check whether data is written successfully through query taos client.
### Launch blm_prometheus
blm_prometheus has following options that you can configure when you launch blm_prometheus.
```sh
--tdengine-name
If TDengine is installed on a server with a domain name, you can also access the TDengine by configuring the domain name of it. In K8S environment, it can be configured as the service name that TDengine runs
--batch-size
blm_prometheus assembles the received prometheus data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time.
--dbname
Set a name for the database created in TDengine, blm_prometheus will automatically create a database named dbname in TDengine, and the default value is prometheus.
--dbuser
Set the user name to access TDengine, the default value is'root '
--dbpassword
Set the password to access TDengine, the default value is'taosdata '
--port
The port number blm_prometheus used to serve prometheus.
```
### Example
Launch an API service for blm_prometheus with the following command:
```bash
./blm_prometheus -port 8088
```
Assuming that the IP address of the server where blm_prometheus located is "10.1.2. 3", the URL shall be added to the configuration file of Prometheus as:
remote_write:
\- url: "http://10.1.2.3:8088/receive"
### Query written data of prometheus
The format of generated data by Prometheus is as follows:
```json
{
Timestamp: 1576466279341,
Value: 37.000000,
apiserver_request_latencies_bucket {
component="apiserver",
instance="192.168.99.116:8443",
job="kubernetes-apiservers",
le="125000",
resource="persistentvolumes", s
cope="cluster",
verb="LIST",
version=“v1"
}
}
```
Where apiserver_request_latencies_bucket is the name of the time-series data collected by prometheus, and the tag of the time-series data is in the following {}. blm_prometheus automatically creates a STable in TDengine with the name of the time series data, and converts the tag in {} into the tag value of TDengine, with Timestamp as the timestamp and value as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction.
```mysql
use prometheus;
select * from apiserver_request_latencies_bucket;
```
## <a class="anchor" id="telegraf"></a> Direct Writing of Telegraf
[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is a popular open source tool for IT operation data collection. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Telegraf without any code, and can directly write the data collected by Telegraf into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
### Compile blm_telegraf From Source Code
Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares:
- A server running Linux OS
- Golang version 1.10 and higher installed
- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
Bailongma project has a folder, blm_telegraf, which holds the Telegraf writing API. The compiling process is as follows:
```bash
cd blm_telegraf
go build
```
If everything goes well, an executable of blm_telegraf will be generated in the corresponding directory.
### Install Telegraf
At the moment, TDengine supports Telegraf version 1.7. 4 and above. Users can download the installation package on Telegraf's website according to your current operating system. The download address is as follows: https://portal.influxdata.com/downloads
### Configure Telegraf
Modify the TDengine-related configurations in the Telegraf configuration file /etc/telegraf/telegraf.conf.
In the output plugins section, add the [[outputs.http]] configuration:
- url: The URL provided by bailongma API service, please refer to the example section below
- data_format: "json"
- json_timestamp_units: "1ms"
In agent section:
- hostname: The machine name that distinguishes different collection devices, and it is necessary to ensure its uniqueness
- metric_batch_size: 100, which is the max number of records per batch wriiten by Telegraf allowed. Increasing the number can reduce the request sending frequency of Telegraf.
For information on how to use Telegraf to collect data and more about using Telegraf, please refer to the official [document](https://docs.influxdata.com/telegraf/v1.11/) of Telegraf.
### Launch blm_telegraf
blm_telegraf has following options, which can be set to tune configurations of blm_telegraf when launching.
```sh
--host
The ip address of TDengine server, default is null
--batch-size
blm_prometheus assembles the received telegraf data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time.
--dbname
Set a name for the database created in TDengine, blm_telegraf will automatically create a database named dbname in TDengine, and the default value is prometheus.
--dbuser
Set the user name to access TDengine, the default value is 'root '
--dbpassword
Set the password to access TDengine, the default value is'taosdata '
--port
The port number blm_telegraf used to serve Telegraf.
```
### Example
Launch an API service for blm_telegraf with the following command
```bash
./blm_telegraf -host 127.0.0.1 -port 8089
```
Assuming that the IP address of the server where blm_telegraf located is "10.1.2. 3", the URL shall be added to the configuration file of telegraf as:
```yaml
url = "http://10.1.2.3:8089/telegraf"
```
### Query written data of telegraf
The format of generated data by telegraf is as follows:
```json
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 89.7897897897898,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 5.405405405405405,
"usage_user": 4.804804804804805
},
"name": "cpu",
"tags": {
"cpu": "cpu2",
"host": "bogon"
},
"timestamp": 1576464360
}
```
Where the name field is the name of the time-series data collected by telegraf, and the tag field is the tag of the time-series data. blm_telegraf automatically creates a STable in TDengine with the name of the time series data, and converts the tag field into the tag value of TDengine, with Timestamp as the timestamp and fields values as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction.
```mysql
use telegraf;
select * from cpu;
```
MQTT is a popular data transmission protocol in the IoT. TDengine can easily access the data received by MQTT Broker and write it to TDengine.
## <a class="anchor" id="emq"></a> Direct Writing of EMQ Broker
[EMQ](https://github.com/emqx/emqx) is an open source MQTT Broker software, with no need of coding, only to use "rules" in EMQ Dashboard for simple configuration, and MQTT data can be directly written into TDengine. EMQ X supports storing data to the TDengine by sending it to a Web service, and also provides a native TDengine driver on Enterprise Edition for direct data store. Please refer to [EMQ official documents](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine) for more details.
## <a class="anchor" id="hivemq"></a> Direct Writing of HiveMQ Broker
[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details.
# Efficient Data Querying
## <a class="anchor" id="queries"></a> Main Query Features
TDengine uses SQL as the query language. Applications can send SQL statements through C/C + +, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions:
- Single-column and multi-column data query
- Multiple filters for tags and numeric values: >, <, =, < >, like, etc
- Group by, Order by, Limit/Offset of aggregation results
- Four operations for numeric columns and aggregation results
- Time stamp aligned join query (implicit join) operations
- Multiple aggregation/calculation functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff, etc
For example, in TAOS shell, the records with vlotage > 215 are queried from table d1001, sorted in descending order by timestamps, and only two records are outputted.
```mysql
taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
ts | current | voltage | phase |
======================================================================================
2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 |
2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 |
Query OK, 2 row(s) in set (0.001100s)
```
In order to meet the needs of an IoT scenario, TDengine supports several special functions, such as twa (time weighted average), spread (difference between maximum and minimum), last_row (last record), etc. More functions related to IoT scenarios will be added. TDengine also supports continuous queries.
For specific query syntax, please see the [Data Query section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#select).
## <a class="anchor" id="aggregation"></a> Multi-table Aggregation Query
In an IoT scenario, there are often multiple data collection points in a same type. TDengine uses the concept of STable to describe a certain type of data collection point, and an ordinary table to describe a specific data collection point. At the same time, TDengine uses tags to describe the statical attributes of data collection points. A given data collection point has a specific tag value. By specifying the filters of tags, TDengine provides an efficient method to aggregate and query the sub-tables of STables (data collection points of a certain type). Aggregation functions and most operations on ordinary tables are applicable to STables, and the syntax is exactly the same.
**Example 1**: In TAOS Shell, look up the average voltages collected by all smart meters in Beijing and group them by location
```mysql
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
avg(voltage) | location |
=============================================================
222.000000000 | Beijing.Haidian |
219.200000000 | Beijing.Chaoyang |
Query OK, 2 row(s) in set (0.002136s)
```
**Example 2**: In TAOS Shell, look up the number of records with groupId 2 in the past 24 hours, check the maximum current of all smart meters
```mysql
taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h;
cunt(*) | max(current) |
==================================
5 | 13.4 |
Query OK, 1 row(s) in set (0.002136s)
```
TDengine only allows aggregation queries between tables belonging to a same STable, means aggregation queries between different STables are not supported. In the Data Query section of TAOS SQL, query class operations will all be indicated that whether STables are supported.
## <a class="anchor" id="sampling"></a> Down Sampling Query, Interpolation
In a scenario of IoT, it is often necessary to aggregate the collected data by intervals through down sampling. TDengine provides a simple keyword interval, which makes query operations according to time windows extremely simple. For example, the current values collected by smart meter d1001 are summed every 10 seconds.
```mysql
taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
ts | sum(current) |
======================================================
2018-10-03 14:38:00.000 | 10.300000191 |
2018-10-03 14:38:10.000 | 24.900000572 |
Query OK, 2 row(s) in set (0.000883s)
```
The down sampling operation is also applicable to STables, such as summing the current values collected by all smart meters in Beijing every second.
```mysql
taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.000 | 10.199999809 |
2018-10-03 14:38:05.000 | 32.900000572 |
2018-10-03 14:38:06.000 | 11.500000000 |
2018-10-03 14:38:15.000 | 12.600000381 |
2018-10-03 14:38:16.000 | 36.000000000 |
Query OK, 5 row(s) in set (0.001538s)
```
The down sampling operation also supports time offset, such as summing the current values collected by all smart meters every second, but requires each time window to start from 500 milliseconds.
```mysql
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.500 | 11.189999809 |
2018-10-03 14:38:05.500 | 31.900000572 |
2018-10-03 14:38:06.500 | 11.600000000 |
2018-10-03 14:38:15.500 | 12.300000381 |
2018-10-03 14:38:16.500 | 35.000000000 |
Query OK, 5 row(s) in set (0.001521s)
```
In a scenario of IoT, it is difficult to synchronize the time stamp of collected data at each point, but many analysis algorithms (such as FFT) need to align the collected data strictly at equal intervals of time. In many systems, it’s required to write their own programs to process, but the down sampling operation of TDengine can be easily solved. If there is no collected data in an interval, TDengine also provides interpolation calculation function.
For details of syntax rules, please refer to the [Time-dimension Aggregation section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#aggregation).
\ No newline at end of file
此差异已折叠。
此差异已折叠。
# Connections with Other Tools
## <a class="anchor" id="grafana"></a> Grafana
TDengine can quickly integrate with [Grafana](https://www.grafana.com/), an open source data visualization system, to build a data monitoring and alarming system. The whole process does not require any code to write. The contents of the data table in TDengine can be visually showed on DashBoard.
### Install Grafana
TDengine currently supports Grafana 5.2.4 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows:
https://grafana.com/grafana/download.
### Configure Grafana
TDengine Grafana plugin is in the /usr/local/taos/connector/grafanaplugin directory.
Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana.
```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
```
### Use Grafana
#### Configure data source
You can log in the Grafana server (username/password:admin/admin) through localhost:3000, and add data sources through `Configuration -> Data Sources` on the left panel, as shown in the following figure:
![img](page://images/connections/add_datasource1.jpg)
Click `Add data source` to enter the Add Data Source page, and enter TDengine in the query box to select Add, as shown in the following figure:
![img](page://images/connections/add_datasource2.jpg)
Enter the data source configuration page and modify the corresponding configuration according to the default prompt:
![img](page://images/connections/add_datasource3.jpg)
- Host: IP address of any server in TDengine cluster and port number of TDengine RESTful interface (6041), default [http://localhost:6041](http://localhost:6041/)
- User: TDengine username.
- Password: TDengine user password.
Click `Save & Test` to test. Success will be prompted as follows:
![img](page://images/connections/add_datasource4.jpg)
#### Create Dashboard
Go back to the home to create Dashboard, and click `Add Query` to enter the panel query page:
![img](page://images/connections/create_dashboard1.jpg)
As shown in the figure above, select the TDengine data source in Query, and enter the corresponding sql in the query box below to query. Details are as follows:
- INPUT SQL: Enter the statement to query (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` , where `from`, `to` and `interval` are built-in variables of the TDengine plug-in, representing the query range and time interval obtained from the Grafana plug-in panel. In addition to built-in variables, it is also supported to use custom template variables.
- ALIAS BY: You can set alias for the current queries.
- GENERATE SQL: Clicking this button will automatically replace the corresponding variable and generate the final statement to execute.
According to the default prompt, query the average system memory usage at the specified interval of the server where the current TDengine deployed in as follows:
![img](page://images/connections/create_dashboard2.jpg)
> Please refer to Grafana [documents](https://grafana.com/docs/) for how to use Grafana to create the corresponding monitoring interface and for more about Grafana usage.
#### Import Dashboard
A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory/usr/local/taos/connector/grafana/tdengine/dashboard/.
Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file:
![img](page://images/connections/import_dashboard1.jpg)
You can see as follows after Dashboard imported.
![img](page://images/connections/import_dashboard2.jpg)
## <a class="anchor" id="matlab"></a> MATLAB
MATLAB can access data to the local workspace by connecting directly to the TDengine via the JDBC Driver provided in the installation package.
### JDBC Interface Adaptation of MATLAB
Several steps are required to adapt MATLAB to TDengine. Taking adapting MATLAB2017a on Windows10 as an example:
- Copy the file JDBCDriver-1.0.0-dist.ja*r* in TDengine package to the directory ${matlab_root}\MATLAB\R2017a\java\jar\toolbox
- Copy the file taos.lib in TDengine package to ${matlab root dir}\MATLAB\R2017a\lib\win64
- Add the .jar package just copied to the MATLAB classpath. Append the line below as the end of the file of ${matlab root dir}\MATLAB\R2017a\toolbox\local\classpath.txt
- ```
$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar
```
- Create a file called javalibrarypath.txt in directory ${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a_, and add the _taos.dll path in the file. For example, if the file taos.dll is in the directory of C:\Windows\System32,then add the following line in file javalibrarypath.txt:
- ```
C:\Windows\System32
```
- ### Connect to TDengine in MATLAB to get data
After the above configured successfully, open MATLAB.
- Create a connection:
```matlab
conn = database(db, root, taosdata, com.taosdata.jdbc.TSDBDriver, jdbc:TSDB://127.0.0.1:0/)
```
* Make a query:
```matlab
sql0 = [select * from tb]
data = select(conn, sql0);
```
* Insert a record:
```matlab
sql1 = [insert into tb values (now, 1)]
exec(conn, sql1)
```
For more detailed examples, please refer to the examples\Matlab\TDEngineDemo.m file in the package.
## <a class="anchor" id="r"></a> R
R language supports connection to the TDengine database through the JDBC interface. First, you need to install the JDBC package of R language. Launch the R language environment, and then execute the following command to install the JDBC support library for R language:
```R
install.packages('RJDBC', repos='http://cran.us.r-project.org')
```
After installed, load the RJDBC package by executing `library('RJDBC')` command.
Then load the TDengine JDBC driver:
```R
drv<-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-2.0.0-dist.jar", identifier.quote="\"")
```
If succeed, no error message will display. Then use the following command to try a database connection:
```R
conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","root","taosdata")
```
Please replace the IP address in the command above to the correct one. If no error message is shown, then the connection is established successfully, otherwise the connection command needs to be adjusted according to the error prompt. TDengine supports below functions in *RJDBC* package:
- `dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)`: Write the data in a data frame iris to the table test in the TDengine server. Parameter overwrite must be false. append must be TRUE and the schema of the data frame iris should be the same as the table test.
- `dbGetQuery(conn, "select count(*) from test")`: run a query command
- `dbSendUpdate (conn, "use db")`: Execute any non-query sql statement. For example, `dbSendUpdate (conn, "use db")`, write data `dbSendUpdate (conn, "insert into t1 values (now, 99)")`, and the like.
- `dbReadTable(conn, "test")`: read all the data in table test
- `dbDisconnect(conn)`: close a connection
- `dbRemoveTable(conn, "test")`: remove table test
The functions below are not supported currently:
- `dbExistsTable(conn, "test")`: if table test exists
- `dbListTables(conn)`: list all tables in the connection
\ No newline at end of file
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
# TaosData Contributor License Agreement
This TaosData Contributor License Agreement (CLA) applies to any contribution you make to any TaosData projects. If you are representing your employing organization to sign this agreement, please warrant that you have the authority to grant the agreement.
## Terms
**"TaosData"**, **"we"**, **"our"** and **"us"** means TaosData, inc.
**"You"** and **"your"** means you or the organization you are on behalf of to sign this agreement.
**"Contribution"** means any original work you, or the organization you represent submit to TaosData for any project in any manner.
## Copyright License
All rights of your Contribution submitted to TaosData in any manner are granted to TaosData and recipients of software distributed by TaosData. You waive any rights that my affect our ownership of the copyright and grant to us a perpetual, worldwide, transferable, non-exclusive, no-charge, royalty-free, irrevocable, and sublicensable license to use, reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute Contributions and any derivative work created based on a Contribution.
## Patent License
With respect to any patents you own or that you can license without payment to any third party, you grant to us and to any recipient of software distributed by us, a perpetual, worldwide, transferable, non-exclusive, no-charge, royalty-free, irrevocable patent license to make, have make, use, sell, offer to sell, import, and otherwise transfer the Contribution in whole or in part, alone or included in any product under any patent you own, or license from a third party, that is necessarily infringed by the Contribution or by combination of the Contribution with any Work.
## Your Representations and Warranties
You represent and warrant that:
- the Contribution you submit is an original work that you can legally grant the rights set out in this agreement.
- the Contribution you submit and licenses you granted does not and will not, infringe the rights of any third party.
- you are not aware of any pending or threatened claims, suits, actions, or charges pertaining to the contributions. You also warrant to notify TaosData immediately if you become aware of any such actual or potential claims, suits, actions, allegations or charges.
## Support
You are not obligated to support your Contribution except you volunteer to provide support. If you want, you can provide for a fee.
**I agree and accept on behalf of myself and behalf of my organization:**
\ No newline at end of file
#FAQ
#### 1. How to upgrade TDengine from 1.X versions to 2.X and above versions?
Version 2.X is a complete refactoring of the previous version, and configuration files and data files are incompatible. Be sure to do the following before upgrading:
1. Delete the configuration file, and execute <code>sudo rm -rf /etc/taos/taos</code>
2. Delete the log file, and execute <code>sudo rm -rf /var/log/taos </code>
3. ENSURE THAT YOUR DATAS ARE NO LONGER NEEDED! Delete the data file, and execute <code>sudo rm -rf /var/lib/taos </code>
4. Enjoy the latest stable version of TDengine
5. If the data needs to be migrated or the data file is corrupted, please contact the official technical support team for assistance
#### 2. When encoutered with the error "Unable to establish connection", what can I do?
The client may encounter connection errors. Please follow the steps below for troubleshooting:
1. Make sure that the client and server version Numbers are exactly the same, and that the open source community and Enterprise versions are not mixed.
2. On the server side, execute `systemctl status taosd` to check the status of *taosd* service. If *taosd* is not running, start it and retry connecting.
3. Make sure you have used the correct server IP address to connect to.
4. Ping the server. If no response is received, check your network connection.
5. Check the firewall setting, make sure the TCP/UDP ports from 6030-6039 are enabled.
6. For JDBC, ODBC, Python, Go connections on Linux, make sure the native library *libtaos.so* are located at /usr/local/lib/taos, and /usr/local/lib/taos is in the *LD_LIBRARY_PATH*.
7. For JDBC, ODBC, Python, Go connections on Windows, make sure *driver/c/taos.dll* is in the system search path (or you can copy taos.dll into *C:\Windows\System32*)
8. If the above steps can not help, try the network diagnostic tool *nc* to check if TCP/UDP port works
check UDP port:`nc -vuz {hostIP} {port} `
check TCP port on server: `nc -l {port}`
check TCP port on client: ` nc {hostIP} {port}`
#### 3. Why I get "Invalid SQL" error when a query is syntactically correct?
If you are sure your query has correct syntax, please check the length of the SQL string. Before version 2.0, it shall be less than 64KB.
#### 4. Does TDengine support validation queries?
For the time being, TDengine does not have a specific set of validation queries. However, TDengine comes with a system monitoring database named 'sys', which can usually be used as a validation query object.
#### 5. Can I delete or update a record that has been written into TDengine?
The answer is NO. The design of TDengine is based on the assumption that records are generated by the connected devices, you won't be allowed to change it. But TDengine provides a retention policy, the data records will be removed once their lifetime is passed.
#### 6. What is the most efficient way to write data to TDengine?
TDengine supports several different writing regimes. The most efficient way to write data to TDengine is to use batch inserting. For details on batch insertion syntax, please refer to [Taos SQL](../documentation/taos-sql)
......@@ -97,7 +97,7 @@ go build -o bin/taosimport app/main.go
是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。
* -savetb int
* -savetb string
当 save 为 1 时保存统计信息的表名, 默认 statistic。
......
此差异已折叠。
module github.com/taosdata/TDengine/importSampleData
go 1.13
require (
github.com/pelletier/go-toml v1.9.0 // indirect
github.com/taosdata/driver-go v0.0.0-20210415143420-d99751356e28 // indirect
)
......@@ -14,7 +14,7 @@ var (
once sync.Once
)
// Config inclue all scene import config
// Config include all scene import config
type Config struct {
UserCases map[string]CaseConfig
}
......@@ -24,7 +24,7 @@ type CaseConfig struct {
Format string
FilePath string
Separator string
Stname string
StName string
SubTableName string
Timestamp string
TimestampType string
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册