提交 1517b69c 编写于 作者: wmmhello's avatar wmmhello

fix:error in schemaless

要显示的变更太多。

To preserve performance only 1000 of 1000+ files are displayed.
...@@ -19,3 +19,6 @@ ...@@ -19,3 +19,6 @@
[submodule "tools/taosadapter"] [submodule "tools/taosadapter"]
path = tools/taosadapter path = tools/taosadapter
url = https://github.com/taosdata/taosadapter.git url = https://github.com/taosdata/taosadapter.git
[submodule "tools/taosws-rs"]
path = tools/taosws-rs
url = https://github.com/taosdata/taosws-rs.git
...@@ -4,11 +4,6 @@ import jenkins.model.CauseOfInterruption ...@@ -4,11 +4,6 @@ import jenkins.model.CauseOfInterruption
node { node {
} }
win_test_stage = 0
linux_ready = 0
linux_node_ip = ""
linux_node_pass = ""
def abortPreviousBuilds() { def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME def currentJobName = env.JOB_NAME
def currentBuildNumber = env.BUILD_NUMBER.toInteger() def currentBuildNumber = env.BUILD_NUMBER.toInteger()
...@@ -43,10 +38,8 @@ def pre_test(){ ...@@ -43,10 +38,8 @@ def pre_test(){
sh ''' sh '''
cd ${WK} cd ${WK}
git reset --hard git reset --hard
git fetch || git fetch
cd ${WKC} cd ${WKC}
git reset --hard git reset --hard
git fetch || git fetch
''' '''
script { script {
if (env.CHANGE_TARGET == 'master') { if (env.CHANGE_TARGET == 'master') {
...@@ -82,9 +75,11 @@ def pre_test(){ ...@@ -82,9 +75,11 @@ def pre_test(){
if (env.CHANGE_URL =~ /\/TDengine\//) { if (env.CHANGE_URL =~ /\/TDengine\//) {
sh ''' sh '''
cd ${WKC} cd ${WKC}
git remote prune origin
git pull >/dev/null git pull >/dev/null
git log -5 git log -5
echo "`date "+%Y%m%d-%H%M%S"` ${JOB_NAME}:${BRANCH_NAME}:${BUILD_ID}:${CHANGE_TARGET}" >>${WKDIR}/jenkins.log echo "`date "+%Y%m%d-%H%M%S"` ${JOB_NAME}:${BRANCH_NAME}:${BUILD_ID}:${CHANGE_TARGET}" >>${WKDIR}/jenkins.log
echo "CHANGE_BRANCH:${CHANGE_BRANCH}" >>${WKDIR}/jenkins.log
echo "community log: `git log -5`" >>${WKDIR}/jenkins.log echo "community log: `git log -5`" >>${WKDIR}/jenkins.log
git fetch origin +refs/pull/${CHANGE_ID}/merge git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD git checkout -qf FETCH_HEAD
...@@ -101,12 +96,14 @@ def pre_test(){ ...@@ -101,12 +96,14 @@ def pre_test(){
git pull >/dev/null git pull >/dev/null
git log -5 git log -5
echo "`date "+%Y%m%d-%H%M%S"` ${JOB_NAME}:${BRANCH_NAME}:${BUILD_ID}:${CHANGE_TARGET}" >>${WKDIR}/jenkins.log echo "`date "+%Y%m%d-%H%M%S"` ${JOB_NAME}:${BRANCH_NAME}:${BUILD_ID}:${CHANGE_TARGET}" >>${WKDIR}/jenkins.log
echo "CHANGE_BRANCH:${CHANGE_BRANCH}" >>${WKDIR}/jenkins.log
echo "tdinternal log: `git log -5`" >>${WKDIR}/jenkins.log echo "tdinternal log: `git log -5`" >>${WKDIR}/jenkins.log
git fetch origin +refs/pull/${CHANGE_ID}/merge git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD git checkout -qf FETCH_HEAD
git log -5 git log -5
echo "tdinternal log merged: `git log -5`" >>${WKDIR}/jenkins.log echo "tdinternal log merged: `git log -5`" >>${WKDIR}/jenkins.log
cd ${WKC} cd ${WKC}
git remote prune origin
git pull >/dev/null git pull >/dev/null
git log -5 git log -5
echo "community log: `git log -5`" >>${WKDIR}/jenkins.log echo "community log: `git log -5`" >>${WKDIR}/jenkins.log
...@@ -130,60 +127,80 @@ def pre_test(){ ...@@ -130,60 +127,80 @@ def pre_test(){
''' '''
return 1 return 1
} }
def pre_test_build_mac() {
sh '''
hostname
date
'''
sh '''
cd ${WK}
rm -rf debug
mkdir debug
'''
sh '''
cd ${WK}/debug
cmake ..
make -j8
'''
sh '''
date
'''
}
def pre_test_win(){ def pre_test_win(){
bat ''' bat '''
hostname hostname
taskkill /f /t /im python.exe
taskkill /f /t /im bash.exe
taskkill /f /t /im taosd.exe
ipconfig ipconfig
set set
date /t date /t
time /t time /t
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug || exit 0 rd /s /Q %WIN_INTERNAL_ROOT%\\debug || exit 0
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal cd %WIN_INTERNAL_ROOT%
git reset --hard git reset --hard
git fetch || git fetch
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community cd %WIN_COMMUNITY_ROOT%
git reset --hard git reset --hard
git fetch || git fetch
''' '''
script { script {
if (env.CHANGE_TARGET == 'master') { if (env.CHANGE_TARGET == 'master') {
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal cd %WIN_INTERNAL_ROOT%
git checkout master git checkout master
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community cd %WIN_COMMUNITY_ROOT%
git checkout master git checkout master
''' '''
} else if(env.CHANGE_TARGET == '2.0') { } else if(env.CHANGE_TARGET == '2.0') {
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal cd %WIN_INTERNAL_ROOT%
git checkout 2.0 git checkout 2.0
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community cd %WIN_COMMUNITY_ROOT%
git checkout 2.0 git checkout 2.0
''' '''
} else if(env.CHANGE_TARGET == '3.0') { } else if(env.CHANGE_TARGET == '3.0') {
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal cd %WIN_INTERNAL_ROOT%
git checkout 3.0 git checkout 3.0
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community cd %WIN_COMMUNITY_ROOT%
git checkout 3.0 git checkout 3.0
''' '''
} else { } else {
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal cd %WIN_INTERNAL_ROOT%
git checkout develop git checkout develop
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community cd %WIN_COMMUNITY_ROOT%
git checkout develop git checkout develop
''' '''
} }
...@@ -191,36 +208,38 @@ def pre_test_win(){ ...@@ -191,36 +208,38 @@ def pre_test_win(){
script { script {
if (env.CHANGE_URL =~ /\/TDengine\//) { if (env.CHANGE_URL =~ /\/TDengine\//) {
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal cd %WIN_INTERNAL_ROOT%
git pull git pull
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community cd %WIN_COMMUNITY_ROOT%
git remote prune origin
git pull git pull
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community cd %WIN_COMMUNITY_ROOT%
git fetch origin +refs/pull/%CHANGE_ID%/merge git fetch origin +refs/pull/%CHANGE_ID%/merge
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community cd %WIN_COMMUNITY_ROOT%
git checkout -qf FETCH_HEAD git checkout -qf FETCH_HEAD
''' '''
} else if (env.CHANGE_URL =~ /\/TDinternal\//) { } else if (env.CHANGE_URL =~ /\/TDinternal\//) {
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal cd %WIN_INTERNAL_ROOT%
git pull git pull
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal cd %WIN_INTERNAL_ROOT%
git fetch origin +refs/pull/%CHANGE_ID%/merge git fetch origin +refs/pull/%CHANGE_ID%/merge
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal cd %WIN_INTERNAL_ROOT%
git checkout -qf FETCH_HEAD git checkout -qf FETCH_HEAD
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community cd %WIN_COMMUNITY_ROOT%
git remote prune origin
git pull git pull
''' '''
} else { } else {
...@@ -230,27 +249,27 @@ def pre_test_win(){ ...@@ -230,27 +249,27 @@ def pre_test_win(){
} }
} }
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal cd %WIN_INTERNAL_ROOT%
git branch git branch
git log -5 git log -5
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community cd %WIN_COMMUNITY_ROOT%
git branch git branch
git log -5 git log -5
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community cd %WIN_COMMUNITY_ROOT%
git submodule update --init --recursive git submodule update --init --recursive
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\taos-connector-python cd %WIN_CONNECTOR_ROOT%
git branch git branch
git reset --hard git reset --hard
git pull git pull
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\taos-connector-python cd %WIN_CONNECTOR_ROOT%
git log -5 git log -5
''' '''
} }
...@@ -258,7 +277,7 @@ def pre_test_build_win() { ...@@ -258,7 +277,7 @@ def pre_test_build_win() {
bat ''' bat '''
echo "building ..." echo "building ..."
time /t time /t
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal cd %WIN_INTERNAL_ROOT%
mkdir debug mkdir debug
cd debug cd debug
time /t time /t
...@@ -273,9 +292,9 @@ def pre_test_build_win() { ...@@ -273,9 +292,9 @@ def pre_test_build_win() {
time /t time /t
''' '''
bat ''' bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\taos-connector-python cd %WIN_CONNECTOR_ROOT%
python -m pip install . python -m pip install .
xcopy /e/y/i/f C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
''' '''
return 1 return 1
} }
...@@ -283,24 +302,22 @@ def run_win_ctest() { ...@@ -283,24 +302,22 @@ def run_win_ctest() {
bat ''' bat '''
echo "windows ctest ..." echo "windows ctest ..."
time /t time /t
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug cd %WIN_INTERNAL_ROOT%\\debug
ctest -j 1 || exit 7 ctest -j 1 || exit 7
time /t time /t
''' '''
} }
def run_win_test() { def run_win_test() {
echo "LINUX NODE: ${linux_node_ip} - ${linux_node_pass}"
bat ''' bat '''
echo "windows test ..." echo "windows test ..."
cd C:\\workspace\\%EXECUTOR_NUMBER%\\taos-connector-python cd %WIN_CONNECTOR_ROOT%
python -m pip install . python -m pip install .
xcopy /e/y/i/f C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
ls -l C:\\Windows\\System32\\taos.dll ls -l C:\\Windows\\System32\\taos.dll
time /t time /t
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community\\tests\\system-test cd %WIN_SYSTEM_TEST_ROOT%
echo "node: ''' + linux_node_ip + ''':''' + linux_node_pass + '''"
echo "testing ..." echo "testing ..."
test-all.bat "{\\\"host\\\":\\\"''' + linux_node_ip + '''\\\",\\\"port\\\":22,\\\"user\\\":\\\"root\\\",\\\"password\\\":\\\"''' + linux_node_pass + '''\\\",\\\"path\\\":\\\"/var/lib/jenkins/workspace/TDinternal\\\"}" test-all.bat ci
time /t time /t
''' '''
} }
...@@ -319,22 +336,31 @@ pipeline { ...@@ -319,22 +336,31 @@ pipeline {
parallel { parallel {
stage('windows test') { stage('windows test') {
agent{label " windows10_01 || windows10_02 || windows10_03 || windows10_04 "} agent{label " windows10_01 || windows10_02 || windows10_03 || windows10_04 "}
environment{
WIN_INTERNAL_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal"
WIN_COMMUNITY_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community"
WIN_SYSTEM_TEST_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community\\tests\\system-test"
WIN_CONNECTOR_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\taos-connector-python"
}
steps { steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
pre_test_win() pre_test_win()
pre_test_build_win() pre_test_build_win()
run_win_ctest() run_win_ctest()
script {
while(linux_ready == 0) {
sleep(8)
}
}
run_win_test() run_win_test()
} }
} }
script { }
win_test_stage = 1 }
stage('mac test') {
agent{label " Mac_catalina "}
steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 20, unit: 'MINUTES'){
pre_test()
pre_test_build_mac()
}
} }
} }
} }
...@@ -346,18 +372,15 @@ pipeline { ...@@ -346,18 +372,15 @@ pipeline {
} }
steps { steps {
script { script {
linux_node_ip = sh ( def linux_node_ip = sh (
script: 'jq .ip /home/node_info.json | sed "s/\\\"//g"', script: 'ip addr|grep 192|grep -v virbr|awk "{print \\\$2}"|sed "s/\\/.*//"',
returnStdout: true
).trim()
linux_node_pass = sh (
script: 'jq .password /home/node_info.json | sed "s/\\\"//g" |sed "s/\\!/^^^^^^^^\\!/g"',
returnStdout: true returnStdout: true
).trim() ).trim()
echo "${linux_node_ip}:${linux_node_pass}" echo "${linux_node_ip}"
echo "${WKDIR}/restore.sh -p ${BRANCH_NAME} -n ${BUILD_ID} -c {container name}"
} }
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 40, unit: 'MINUTES'){ timeout(time: 120, unit: 'MINUTES'){
pre_test() pre_test()
script { script {
sh ''' sh '''
...@@ -368,11 +391,36 @@ pipeline { ...@@ -368,11 +391,36 @@ pipeline {
rm -f /tmp/cases.task rm -f /tmp/cases.task
./collect_cases.sh -e ./collect_cases.sh -e
''' '''
def extra_param = ""
def log_server_file = "/home/log_server.json"
def timeout_cmd = ""
if (fileExists(log_server_file)) {
def log_server_enabled = sh (
script: 'jq .enabled ' + log_server_file,
returnStdout: true
).trim()
def timeout_param = sh (
script: 'jq .timeout ' + log_server_file,
returnStdout: true
).trim()
if (timeout_param != "null" && timeout_param != "0") {
timeout_cmd = "timeout " + timeout_param
}
if (log_server_enabled == "1") {
def log_server = sh (
script: 'jq .server ' + log_server_file + ' | sed "s/\\\"//g"',
returnStdout: true
).trim()
if (log_server != "null" && log_server != "") {
extra_param = "-w " + log_server
}
}
}
sh ''' sh '''
cd ${WKC}/tests/parallel_test cd ${WKC}/tests/parallel_test
export DEFAULT_RETRY_TIME=2 export DEFAULT_RETRY_TIME=2
date date
timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480 ''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480 ''' + extra_param + '''
''' '''
} }
} }
...@@ -387,38 +435,9 @@ pipeline { ...@@ -387,38 +435,9 @@ pipeline {
cd ${WKC}/packaging cd ${WKC}/packaging
./release.sh -v cluster -n 3.0.0.100 -s static ./release.sh -v cluster -n 3.0.0.100 -s static
''' '''
sh '''
echo "install ..."
cd ${WKC}/release
tar xzf TDengine-enterprise-server-3.0.0.100-Linux-x64.tar.gz
cd TDengine-enterprise-server-3.0.0.100
service taosd stop || :
rm -rf /var/lib/taos
./install.sh -e no
'''
sh '''
echo "checking ..."
which taos
which taosd
rm -rf ${WK}/debug
mv ${WKC}/debug ${WK}/
'''
sh '''
echo "install taospy ..."
cd ${WKPY}
pip3 install .
'''
} }
} }
} }
script {
linux_ready = 1
}
script {
while(win_test_stage == 0){
sleep(12)
}
}
} }
} }
} }
......
<p>
<p align="center">
<a href="https://tdengine.com" target="_blank">
<img
src="docs/assets/tdengine.svg"
alt="TDengine"
width="500"
/>
</a>
</p>
<p>
[![Build Status](https://travis-ci.org/taosdata/TDengine.svg?branch=master)](https://travis-ci.org/taosdata/TDengine) [![Build Status](https://travis-ci.org/taosdata/TDengine.svg?branch=master)](https://travis-ci.org/taosdata/TDengine)
[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master) [![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop) [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
[![tdengine](https://snapcraft.io//tdengine/badge.svg)](https://snapcraft.io/tdengine) [![tdengine](https://snapcraft.io//tdengine/badge.svg)](https://snapcraft.io/tdengine)
[![TDengine](TDenginelogo.png)](https://www.taosdata.com) 简体中文 | [English](README.md) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)
简体中文 | [English](./README.md)
# TDengine 简介 # TDengine 简介
TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。除核心的快10倍以上的时序数据库功能外,还提供缓存、数据订阅、流式计算等功能,最大程度减少研发和运维的复杂度,且核心代码,包括集群功能全部开源(开源协议,AGPL v3.0)。 TDengine 是一款高性能、分布式、支持 SQL 的时序数据库(Time-Series Database)。而且除时序数据库功能外,它还提供缓存、数据订阅、流式计算等功能,最大程度减少研发和运维的复杂度,且核心代码,包括集群功能全部开源(开源协议,AGPL v3.0)。与其他时序数据数据库相比,TDengine 有以下特点:
- **高性能**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,而且存储空间也大为节省。
- **分布式**:通过原生分布式的设计,TDengine 提供了水平扩展的能力,只需要增加节点就能获得更强的数据处理能力,同时通过多副本机制保证了系统的高可用。
- **支持 SQL**:TDengine 采用 SQL 作为数据查询语言,减少学习和迁移成本,同时提供 SQL 扩展来处理时序数据特有的分析,而且支持方便灵活的 schemaless 数据写入。
- **All in One**:将数据库、消息队列、缓存、流式计算等功能融合一起,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低应用开发和维护成本。
- **零管理**:安装、集群几秒搞定,无任何依赖,不用分库分表,系统运行状态监测能与 Grafana 或其他运维工具无缝集成。
- **零学习成本**:采用 SQL 查询语言,支持 Python、Java、C/C++、Go、Rust、Node.js 等多种编程语言,与 MySQL 相似,零学习成本。
- **无缝集成**:不用一行代码,即可与 Telegraf、Grafana、EMQX、Prometheus、StatsD、collectd、Matlab、R 等第三方工具无缝集成。
- 10 倍以上性能提升。定义了创新的数据存储结构,单核每秒就能处理至少2万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快了十倍以上。 - **互动 Console**: 通过命令行 console,不用编程,执行 SQL 语句就能做即席查询、各种数据库的操作、管理以及集群的维护.
- 硬件或云服务成本降至1/5。由于超强性能,计算资源不到通用大数据方案的1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的1/10。
- 全栈时序数据处理引擎。将数据库、消息队列、缓存、流式计算等功能融合一起,应用无需再集成Kafka/Redis/HBase/Spark等软件,大幅降低应用开发和维护成本。 TDengine 可以广泛应用于物联网、工业互联网、车联网、IT 运维、能源、金融等领域,让大量设备、数据采集器每天产生的高达 TB 甚至 PB 级的数据能得到高效实时的处理,对业务的运行状态进行实时的监测、预警,从大数据中挖掘出商业价值。
- 强大的分析功能。无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell/Python/R/Matlab随时进行。
- 与第三方工具无缝连接。不用一行代码,即可与Telegraf, Grafana, EMQ X, Prometheus, Matlab, R集成。后续还将支持MQTT, OPC, Hadoop,Spark等, BI工具也将无缝连接。
- 零运维成本、零学习成本。安装、集群一秒搞定,无需分库分表,实时备份。标准SQL,支持JDBC,RESTful,支持Python/Java/C/C++/Go/Node.JS, 与MySQL相似,零学习成本。
# 文档 # 文档
TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是 [数据模型](https://www.taosdata.com/cn/documentation/architecture)[数据建模](https://www.taosdata.com/cn/documentation/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf) TDengine 采用传统的关系数据库模型,您可以像使用关系型数据库 MySQL 一样来使用它。但由于引入了超级表,一个采集点一张表的概念,建议您在使用前仔细阅读一遍下面的文档,特别是 [数据模型](https://www.taosdata.com/cn/documentation/architecture)[数据建模](https://www.taosdata.com/cn/documentation/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf)
# 构建 # 构建
TDengine目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、macOS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。本快速指南仅适用于通过源码安装。 TDengine 目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、macOS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd。CPU 支持 X64/ARM64/MIPS64/Alpha64,后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。本快速指南仅适用于通过源码安装。
## 安装工具 ## 安装工具
### Ubuntu 16.04 及以上版本 & Debian: ### Ubuntu 16.04 及以上版本 & Debian:
```bash ```bash
sudo apt-get install -y gcc cmake build-essential git sudo apt-get install -y gcc cmake build-essential git libssl-dev
``` ```
### Ubuntu 14.04: ### Ubuntu 14.04:
...@@ -56,10 +77,22 @@ sudo apt-get install -y openjdk-8-jdk ...@@ -56,10 +77,22 @@ sudo apt-get install -y openjdk-8-jdk
sudo apt-get install -y maven sudo apt-get install -y maven
``` ```
#### 为 taos-tools 安装编译需要的软件
taosTools 是用于 TDengine 的辅助工具软件集合。目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。
默认 TDengine 编译不包含 taosTools。您可以在编译 TDengine 时使用`cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。
为了在 Ubuntu/Debian 系统上编译 [taos-tools](https://github.com/taosdata/taos-tools) 需要安装如下软件:
```bash
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config
```
### CentOS 7: ### CentOS 7:
```bash ```bash
sudo yum install -y gcc gcc-c++ make cmake git sudo yum install -y gcc gcc-c++ make cmake git openssl-devel
``` ```
安装 OpenJDK 8: 安装 OpenJDK 8:
...@@ -74,10 +107,10 @@ sudo yum install -y java-1.8.0-openjdk ...@@ -74,10 +107,10 @@ sudo yum install -y java-1.8.0-openjdk
sudo yum install -y maven sudo yum install -y maven
``` ```
### CentOS 8 & Fedora: ### CentOS 8 & Fedora
```bash ```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
``` ```
安装 OpenJDK 8: 安装 OpenJDK 8:
...@@ -92,6 +125,33 @@ sudo dnf install -y java-1.8.0-openjdk ...@@ -92,6 +125,33 @@ sudo dnf install -y java-1.8.0-openjdk
sudo dnf install -y maven sudo dnf install -y maven
``` ```
#### 在 CentOS 上构建 taosTools 安装依赖软件
为了在 CentOS 上构建 [taosTools](https://github.com/taosdata/taos-tools) 需要安装如下依赖软件
```bash
sudo yum install zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
```
注意:由于 snappy 缺乏 pkg-config 支持
(参考 [链接](https://github.com/google/snappy/pull/86)),会导致
cmake 提示无法发现 libsnappy,实际上工作正常。
### 设置 golang 开发环境
TDengine 包含数个使用 Go 语言开发的组件,请参考 golang.org 官方文档设置 go 开发环境。
请使用 1.14 及以上版本。对于中国用户,我们建议使用代理来加速软件包下载。
```
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct
```
### 设置 rust 开发环境
TDengine 包含数个使用 Rust 语言开发的组件. 请参考 rust-lang.org 官方文档设置 rust 开发环境。
## 获取源码 ## 获取源码
首先,你需要从 GitHub 克隆源码: 首先,你需要从 GitHub 克隆源码:
...@@ -107,22 +167,41 @@ Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话 ...@@ -107,22 +167,41 @@ Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话
git submodule update --init --recursive git submodule update --init --recursive
``` ```
如果使用 https 协议下载比较慢,可以通过修改 ~/.gitconfig 文件添加以下两行设置使用 ssh 协议下载。需要首先上传 ssh 密钥到 GitHub,详细方法请参考 GitHub 官方文档。
```
[url "git@github.com:"]
insteadOf = https://github.com/
```
## 构建 TDengine ## 构建 TDengine
### Linux 系统 ### Linux 系统
可以运行代码仓库中的 `build.sh` 脚本编译出 TDengine 和 taosTools(包含 taosBenchmark 和 taosdump)。
```bash ```bash
mkdir debug && cd debug ./build.sh
cmake .. && cmake --build .
``` ```
您可以选择使用 Jemalloc 作为内存分配器,替代默认的 glibc: 这个脚本等价于执行如下命令:
```bash
git submodule update --init --recursive
mkdir debug
cd debug
cmake .. -DBUILD_TOOLS=true
make
```
您也可以选择使用 jemalloc 作为内存分配器,替代默认的 glibc:
```bash ```bash
apt install autoconf apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true cmake .. -DJEMALLOC_ENABLED=true
``` ```
在X86-64、X86、arm64、arm32 和 mips64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。 X86-64、X86、arm64、arm32 和 mips64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
aarch64: aarch64:
...@@ -157,7 +236,7 @@ nmake ...@@ -157,7 +236,7 @@ nmake
如果你使用的是 Visual Studio 2019 或 2017 版本: 如果你使用的是 Visual Studio 2019 或 2017 版本:
打开cmd.exe,执行 vcvarsall.bat 时,为 64 位操作系统指定“x64”,为 32 位操作系统指定“x86”。 打开 cmd.exe,执行 vcvarsall.bat 时,为 64 位操作系统指定“x64”,为 32 位操作系统指定“x86”。
```bash ```bash
mkdir debug && cd debug mkdir debug && cd debug
...@@ -174,7 +253,7 @@ cmake .. -G "NMake Makefiles" ...@@ -174,7 +253,7 @@ cmake .. -G "NMake Makefiles"
nmake nmake
``` ```
### Mac OS X 系统 ### macOS 系统
安装 Xcode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。 安装 Xcode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。
...@@ -185,13 +264,17 @@ cmake .. && cmake --build . ...@@ -185,13 +264,17 @@ cmake .. && cmake --build .
# 安装 # 安装
生成完成后,安装 TDengine(下文给出的指令以 Linux 为例,如果是在 Windows 下,那么对应的指令会是 `nmake install`): ## Linux 系统
生成完成后,安装 TDengine:
```bash ```bash
sudo make install sudo make install
``` ```
用户可以在[文件目录结构](https://www.taosdata.com/cn/documentation/administrator#directories)中了解更多在操作系统中生成的目录或文件。 用户可以在[文件目录结构](https://www.taosdata.com/cn/documentation/administrator#directories)中了解更多在操作系统中生成的目录或文件。
从 2.0 版本开始, 从源代码安装也会为 TDengine 配置服务管理。
用户也可以选择[从安装包中安装](https://www.taosdata.com/en/getting-started/#Install-from-Package)
安装成功后,在终端中启动 TDengine 服务: 安装成功后,在终端中启动 TDengine 服务:
...@@ -207,6 +290,40 @@ taos ...@@ -207,6 +290,40 @@ taos
如果 TDengine Shell 连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印出错误消息。 如果 TDengine Shell 连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印出错误消息。
## Windows 系统
生成完成后,安装 TDengine:
```cmd
nmake install
```
## macOS 系统
生成完成后,安装 TDengine:
```bash
sudo make install
```
安装成功后,如果想以服务形式启动,先配置 `.plist` 文件,在终端中执行:
```bash
sudo cp ../packaging/macOS/com.taosdata.tdengine.plist /Library/LaunchDaemons
```
在终端中启动 TDengine 服务:
```bash
sudo launchctl load /Library/LaunchDaemons/com.taosdata.tdengine.plist
```
在终端中停止 TDengine 服务:
```bash
sudo launchctl unload /Library/LaunchDaemons/com.taosdata.tdengine.plist
```
## 快速运行 ## 快速运行
如果不希望以服务方式运行 TDengine,也可以在终端中直接运行它。也即在生成完成后,执行以下命令(在 Windows 下,生成的可执行文件会带有 .exe 后缀,例如会名为 taosd.exe ): 如果不希望以服务方式运行 TDengine,也可以在终端中直接运行它。也即在生成完成后,执行以下命令(在 Windows 下,生成的可执行文件会带有 .exe 后缀,例如会名为 taosd.exe ):
...@@ -225,15 +342,15 @@ taos ...@@ -225,15 +342,15 @@ taos
# 体验 TDengine # 体验 TDengine
TDengine终端中,用户可以通过SQL命令来创建/删除数据库、表等,并进行插入查询操作。 TDengine 终端中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行插入查询操作。
```bash ```sql
create database demo; CREATE DATABASE demo;
use demo; USE demo;
create table t (ts timestamp, speed int); CREATE TABLE t (ts TIMESTAMP, speed INT);
insert into t values ('2019-07-15 00:00:00', 10); INSERT INTO t VALUES('2019-07-15 00:00:00', 10);
insert into t values ('2019-07-15 01:00:00', 20); INSERT INTO t VALUES('2019-07-15 01:00:00', 20);
select * from t; SELECT * FROM t;
ts | speed | ts | speed |
=================================== ===================================
19-07-15 00:00:00.000| 10| 19-07-15 00:00:00.000| 10|
...@@ -245,33 +362,35 @@ Query OK, 2 row(s) in set (0.001700s) ...@@ -245,33 +362,35 @@ Query OK, 2 row(s) in set (0.001700s)
## 官方连接器 ## 官方连接器
TDengine 提供了丰富的应用程序开发接口,其中包括C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用: TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用:
- [Java](https://www.taosdata.com/cn/documentation/connector/java)
- Java - [C/C++](https://www.taosdata.com/cn/documentation/connector#c-cpp)
- C/C++ - [Python](https://www.taosdata.com/cn/documentation/connector#python)
- Python - [Go](https://www.taosdata.com/cn/documentation/connector#go)
- Go - [RESTful API](https://www.taosdata.com/cn/documentation/connector#restful)
- RESTful API - [Node.js](https://www.taosdata.com/cn/documentation/connector#nodejs)
- Node.js - [Rust](https://www.taosdata.com/cn/documentation/connector/rust)
## 第三方连接器 ## 第三方连接器
TDengine 社区生态中也有一些非常友好的第三方连接器,可以通过以下链接访问它们的源码。 TDengine 社区生态中也有一些非常友好的第三方连接器,可以通过以下链接访问它们的源码。
- [Rust Connector](https://github.com/taosdata/TDengine/tree/master/tests/examples/rust) - [Rust Bindings](https://github.com/songtianyi/tdengine-rust-bindings/tree/master/examples)
- [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos) - [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos)
- [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua) - [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/examples/lua)
# 运行和添加测试例 # 运行和添加测试例
TDengine 的测试框架和所有测试例全部开源。 TDengine 的测试框架和所有测试例全部开源。
点击 [这里](tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md),了解如何运行测试例和添加新的测试例。 点击 [这里](https://github.com/taosdata/TDengine/blob/develop/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md),了解如何运行测试例和添加新的测试例。
# 成为社区贡献者 # 成为社区贡献者
...@@ -279,8 +398,8 @@ TDengine 的测试框架和所有测试例全部开源。 ...@@ -279,8 +398,8 @@ TDengine 的测试框架和所有测试例全部开源。
# 加入技术交流群 # 加入技术交流群
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小T为好友,即可入群。 TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小 T 为好友,即可入群。
# [谁在使用TDengine](https://github.com/taosdata/TDengine/issues/2432) # [谁在使用 TDengine](https://github.com/taosdata/TDengine/issues/2432)
欢迎所有 TDengine 用户及贡献者在 [这里](https://github.com/taosdata/TDengine/issues/2432) 分享您在当前工作中开发/使用 TDengine 的故事。 欢迎所有 TDengine 用户及贡献者在 [这里](https://github.com/taosdata/TDengine/issues/2432) 分享您在当前工作中开发/使用 TDengine 的故事。
<p>
<p align="center">
<a href="https://tdengine.com" target="_blank">
<img
src="docs/assets/tdengine.svg"
alt="TDengine"
width="500"
/>
</a>
</p>
<p>
[![Build Status](https://cloud.drone.io/api/badges/taosdata/TDengine/status.svg?ref=refs/heads/master)](https://cloud.drone.io/taosdata/TDengine) [![Build Status](https://cloud.drone.io/api/badges/taosdata/TDengine/status.svg?ref=refs/heads/master)](https://cloud.drone.io/taosdata/TDengine)
[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master) [![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop) [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
[![tdengine](https://snapcraft.io//tdengine/badge.svg)](https://snapcraft.io/tdengine) [![tdengine](https://snapcraft.io//tdengine/badge.svg)](https://snapcraft.io/tdengine)
[![TDengine](TDenginelogo.png)](https://www.taosdata.com) English | [简体中文](README-CN.md) | We are hiring, check [here](https://tdengine.com/careers)
English | [简体中文](./README-CN.md)
# What is TDengine? # What is TDengine?
TDengine is an open-sourced big data platform under [GNU AGPL v3.0](http://www.gnu.org/licenses/agpl-3.0.html), designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring. Besides the 10x faster time-series database, it provides caching, stream computing, message queuing and other functionalities to reduce the complexity and cost of development and operation. TDengine is a high-performance, scalable time-series database with SQL support. Its code including cluster feature is open source under [GNU AGPL v3.0](http://www.gnu.org/licenses/agpl-3.0.html). Besides the database, it provides caching, stream processing, data subscription and other functionalities to reduce the complexity and cost of development and operation. TDengine differentiates itself from other TSDBs with the following advantages.
- **High Performance**: TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage cost and compute costs, with an innovatively designed and purpose-built storage engine.
- **Scalable**: TDengine provides out-of-box scalability and high-availability through its native distributed design. Nodes can be added through simple configuration to achieve greater data processing power. In addition, this feature is open source.
- **10x Faster on Insert/Query Speeds**: Through the innovative design on storage, on a single-core machine, over 20K requests can be processed, millions of data points can be ingested, and over 10 million data points can be retrieved in a second. It is 10 times faster than other databases. - **SQL Support**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to handle time-series data better, and supporting convenient and flexible schemaless data ingestion.
- **1/5 Hardware/Cloud Service Costs**: Compared with typical big data solutions, less than 1/5 of computing resources are required. Via column-based storage and tuned compression algorithms for different data types, less than 1/10 of storage space is needed. - **All in One**: TDengine has built-in caching, stream processing and data subscription functions, it is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software in some scenarios. It makes the system architecture much simpler and easy to maintain.
- **Full Stack for Time-Series Data**: By integrating a database with message queuing, caching, and stream computing features together, it is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software. It makes the system architecture much simpler and more robust. - **Seamless Integration**: Without a single line of code, TDengine provide seamless integration with third-party tools such as Telegraf, Grafana, EMQX, Prometheus, StatsD, collectd, etc. More will be integrated.
- **Powerful Data Analysis**: Whether it is 10 years or one minute ago, data can be queried just by specifying the time range. Data can be aggregated over time, multiple time streams or both. Ad Hoc queries or analyses can be executed via TDengine shell, Python, R or Matlab. - **Zero Management**: Installation and cluster setup can be done in seconds. Data partitioning and sharding are executed automatically. TDengine’s running status can be monitored via Grafana or other DevOps tools.
- **Seamless Integration with Other Tools**: Telegraf, Grafana, Matlab, R, and other tools can be integrated with TDengine without a line of code. MQTT, OPC, Hadoop, Spark, and many others will be integrated soon. - **Zero Learning Cost**: With SQL as the query language, support for ubiquitous tools like Python, Java, C/C++, Go, Rust, Node.js connectors, there is zero learning cost.
- **Zero Management, No Learning Curve**: It takes only seconds to download, install, and run it successfully; there are no other dependencies. Automatic partitioning on tables or DBs. Standard SQL is used, with C/C++, Python, JDBC, Go and RESTful connectors. - **Interactive Console**: TDengine provides convenient console access to the database to run ad hoc queries, maintain the database, or manage the cluster without any programming.
TDengine can be widely applied to Internet of Things (IoT), Connected Vehicles, Industrial IoT, DevOps, energy, finance and many other scenarios.
# Documentation # Documentation
For user manual, system design and architecture, engineering blogs, refer to [TDengine Documentation](https://www.taosdata.com/en/documentation/)(中文版请点击[这里](https://www.taosdata.com/cn/documentation20/)) For user manual, system design and architecture, engineering blogs, refer to [TDengine Documentation](https://www.taosdata.com/en/documentation/)(中文版请点击[这里](https://www.taosdata.com/cn/documentation20/))
for details. The documentation from our website can also be downloaded locally from *documentation/tdenginedocs-en* or *documentation/tdenginedocs-cn*. for details. The documentation from our website can also be downloaded locally from _documentation/tdenginedocs-en_ or _documentation/tdenginedocs-cn_.
# Building # Building
At the moment, TDengine only supports building and running on Linux systems. You can choose to [install from packages](https://www.taosdata.com/en/getting-started/#Install-from-Package) or from the source code. This quick guide is for installation from the source only.
To build TDengine, use [CMake](https://cmake.org/) 2.8.12.x or higher versions in the project directory. At the moment, TDengine server only supports running on Linux systems. You can choose to [install from packages](https://www.taosdata.com/en/getting-started/#Install-from-Package) or build it from the source code. This quick guide is for installation from the source only.
To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in the project directory.
## Install tools ## Install build dependencies
### Ubuntu 16.04 and above or Debian
### Ubuntu 16.04 and above & Debian:
```bash ```bash
sudo apt-get install -y gcc cmake build-essential git sudo apt-get install -y gcc cmake build-essential git libssl-dev
``` ```
### Ubuntu 14.04: ### Ubuntu 14.04
```bash ```bash
sudo apt-get install -y gcc cmake3 build-essential git binutils-2.26 sudo apt-get install -y gcc cmake3 build-essential git binutils-2.26
export PATH=/usr/lib/binutils-2.26/bin:$PATH export PATH=/usr/lib/binutils-2.26/bin:$PATH
``` ```
To compile and package the JDBC driver source code, you should have a Java jdk-8 or higher and Apache Maven 2.7 or higher installed. To compile and package the JDBC driver source code, you should have a Java jdk-8 or higher and Apache Maven 2.7 or higher installed.
To install openjdk-8: To install openjdk-8:
```bash ```bash
sudo apt-get install -y openjdk-8-jdk sudo apt-get install -y openjdk-8-jdk
``` ```
To install Apache Maven: To install Apache Maven:
```bash ```bash
sudo apt-get install -y maven sudo apt-get install -y maven
``` ```
### Centos 7: #### Install build dependencies for taosTools
We provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. From TDengine 2.4.0.0, taosBenchmark and taosdump were not released together with TDengine.
By default, TDengine compiling does not include taosTools. You can use 'cmake .. -DBUILD_TOOLS=true' to make them be compiled with TDengine.
To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed.
```bash ```bash
sudo yum install -y gcc gcc-c++ make cmake git sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config
```
### CentOS 7
```bash
sudo yum install epel-release
sudo yum update
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
``` ```
To install openjdk-8: To install openjdk-8:
```bash ```bash
sudo yum install -y java-1.8.0-openjdk sudo yum install -y java-1.8.0-openjdk
``` ```
To install Apache Maven: To install Apache Maven:
```bash ```bash
sudo yum install -y maven sudo yum install -y maven
``` ```
### Centos 8 & Fedora: ### CentOS 8 & Fedora
```bash ```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release git sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
``` ```
To install openjdk-8: To install openjdk-8:
```bash ```bash
sudo dnf install -y java-1.8.0-openjdk sudo dnf install -y java-1.8.0-openjdk
``` ```
To install Apache Maven: To install Apache Maven:
```bash ```bash
sudo dnf install -y maven sudo dnf install -y maven
``` ```
#### Install build dependencies for taosTools on CentOS
To build the [taosTools](https://github.com/taosdata/taos-tools) on CentOS, the following packages need to be installed.
```bash
sudo yum install zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
```
Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it lead a cmake prompt libsnappy not found. But snappy will works well.
### Setup golang environment
TDengine includes few components developed by Go language. Please refer to golang.org official documentation for golang environment setup.
Please use version 1.14+. For the user in China, we recommend using a proxy to accelerate package downloading.
```
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct
```
### Setup rust environment
TDengine includees few compoments developed by Rust language. Please refer to rust-lang.org official documentation for rust environment setup.
## Get the source codes ## Get the source codes
First of all, you may clone the source codes from github: First of all, you may clone the source codes from github:
```bash ```bash
git clone https://github.com/taosdata/TDengine.git git clone https://github.com/taosdata/TDengine.git
cd TDengine cd TDengine
``` ```
The connectors for go & grafana have been moved to separated repositories, The connectors for go & Grafana and some tools have been moved to separated repositories,
so you should run this command in the TDengine directory to install them: so you should run this command in the TDengine directory to install them:
```bash ```bash
git submodule update --init --recursive git submodule update --init --recursive
``` ```
You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail.
```
[url "git@github.com:"]
insteadOf = https://github.com/
```
## Build TDengine ## Build TDengine
### On Linux platform ### On Linux platform
You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below:
```bash ```bash
mkdir debug && cd debug ./build.sh
cmake .. && cmake --build . ```
It equals to execute following commands:
```bash
git submodule update --init --recursive
mkdir debug
cd debug
cmake .. -DBUILD_TOOLS=true
make
```
Note TDengine 2.3.x.0 and later use a component named 'taosAdapter' to play http daemon role by default instead of the http daemon embedded in the early version of TDengine. The taosAdapter is programmed by go language. If you pull TDengine source code to the latest from an existing codebase, please execute 'git submodule update --init --recursive' to pull taosAdapter source code. Please install go language version 1.14 or above for compiling taosAdapter. If you meet difficulties regarding 'go mod', especially you are from China, you can use a proxy to solve the problem.
```
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct
```
The embedded http daemon still be built from TDengine source code by default. Or you can use the following command to choose to build taosAdapter.
```
cmake .. -DBUILD_HTTP=false
``` ```
You can use Jemalloc as memory allocator instead of glibc: You can use Jemalloc as memory allocator instead of glibc:
``` ```
apt install autoconf apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true cmake .. -DJEMALLOC_ENABLED=true
...@@ -120,16 +222,19 @@ TDengine build script can detect the host machine's architecture on X86-64, X86, ...@@ -120,16 +222,19 @@ TDengine build script can detect the host machine's architecture on X86-64, X86,
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct: You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct:
aarch64: aarch64:
```bash ```bash
cmake .. -DCPUTYPE=aarch64 && cmake --build . cmake .. -DCPUTYPE=aarch64 && cmake --build .
``` ```
aarch32: aarch32:
```bash ```bash
cmake .. -DCPUTYPE=aarch32 && cmake --build . cmake .. -DCPUTYPE=aarch32 && cmake --build .
``` ```
mips64: mips64:
```bash ```bash
cmake .. -DCPUTYPE=mips64 && cmake --build . cmake .. -DCPUTYPE=mips64 && cmake --build .
``` ```
...@@ -138,6 +243,7 @@ cmake .. -DCPUTYPE=mips64 && cmake --build . ...@@ -138,6 +243,7 @@ cmake .. -DCPUTYPE=mips64 && cmake --build .
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
Please specify "amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat. Please specify "amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```cmd ```cmd
mkdir debug && cd debug mkdir debug && cd debug
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < amd64 | x86 > "C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < amd64 | x86 >
...@@ -158,13 +264,14 @@ nmake ...@@ -158,13 +264,14 @@ nmake
``` ```
Or, you can simply open a command window by clicking Windows Start -> "Visual Studio < 2019 | 2017 >" folder -> "x64 Native Tools Command Prompt for VS < 2019 | 2017 >" or "x86 Native Tools Command Prompt for VS < 2019 | 2017 >" depends what architecture your Windows is, then execute commands as follows: Or, you can simply open a command window by clicking Windows Start -> "Visual Studio < 2019 | 2017 >" folder -> "x64 Native Tools Command Prompt for VS < 2019 | 2017 >" or "x86 Native Tools Command Prompt for VS < 2019 | 2017 >" depends what architecture your Windows is, then execute commands as follows:
```cmd ```cmd
mkdir debug && cd debug mkdir debug && cd debug
cmake .. -G "NMake Makefiles" cmake .. -G "NMake Makefiles"
nmake nmake
``` ```
### On Mac OS X platform ### On macOS platform
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
...@@ -175,7 +282,10 @@ cmake .. && cmake --build . ...@@ -175,7 +282,10 @@ cmake .. && cmake --build .
# Installing # Installing
After building successfully, TDengine can be installed by: (On Windows platform, the following command should be `nmake install`) ## On Linux platform
After building successfully, TDengine can be installed by
```bash ```bash
sudo make install sudo make install
``` ```
...@@ -184,68 +294,129 @@ Users can find more information about directories installed on the system in the ...@@ -184,68 +294,129 @@ Users can find more information about directories installed on the system in the
Users can also choose to [install from packages](https://www.taosdata.com/en/getting-started/#Install-from-Package) for it. Users can also choose to [install from packages](https://www.taosdata.com/en/getting-started/#Install-from-Package) for it.
To start the service after installation, in a terminal, use: To start the service after installation, in a terminal, use:
```bash ```bash
sudo systemctl start taosd sudo systemctl start taosd
``` ```
Then users can use the [TDengine shell](https://www.taosdata.com/en/getting-started/#TDengine-Shell) to connect the TDengine server. In a terminal, use: Then users can use the [TDengine shell](https://www.taosdata.com/en/getting-started/#TDengine-Shell) to connect the TDengine server. In a terminal, use:
```bash ```bash
taos taos
``` ```
If TDengine shell connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. If TDengine shell connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown.
### Install TDengine by apt-get
If you use Debian or Ubuntu system, you can use 'apt-get' command to install TDengine from official repository. Please use following commands to setup:
```
wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list
[Optional] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
sudo apt-get update
apt-cache policy tdengine
sudo apt-get install tdengine
```
## On Windows platform
After building successfully, TDengine can be installed by:
```cmd
nmake install
```
## On macOS platform
After building successfully, TDengine can be installed by:
```bash
sudo make install
```
To start the service after installation, config `.plist` file first, in a terminal, use:
```bash
sudo cp ../packaging/macOS/com.taosdata.tdengine.plist /Library/LaunchDaemons
```
To start the service, in a terminal, use:
```bash
sudo launchctl load /Library/LaunchDaemons/com.taosdata.tdengine.plist
```
To stop the service, in a terminal, use:
```bash
sudo launchctl unload /Library/LaunchDaemons/com.taosdata.tdengine.plist
```
## Quick Run ## Quick Run
If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`) If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`)
```bash ```bash
./build/bin/taosd -c test/cfg ./build/bin/taosd -c test/cfg
``` ```
In another terminal, use the TDengine shell to connect the server: In another terminal, use the TDengine shell to connect the server:
```bash ```bash
./build/bin/taos -c test/cfg ./build/bin/taos -c test/cfg
``` ```
option "-c test/cfg" specifies the system configuration file directory. option "-c test/cfg" specifies the system configuration file directory.
# Try TDengine # Try TDengine
It is easy to run SQL commands from TDengine shell which is the same as other SQL databases. It is easy to run SQL commands from TDengine shell which is the same as other SQL databases.
```sql ```sql
create database db; CREATE DATABASE demo;
use db; USE demo;
create table t (ts timestamp, a int); CREATE TABLE t (ts TIMESTAMP, speed INT);
insert into t values ('2019-07-15 00:00:00', 1); INSERT INTO t VALUES('2019-07-15 00:00:00', 10);
insert into t values ('2019-07-15 01:00:00', 2); INSERT INTO t VALUES('2019-07-15 01:00:00', 20);
select * from t; SELECT * FROM t;
drop database db; ts | speed |
===================================
19-07-15 00:00:00.000| 10|
19-07-15 01:00:00.000| 20|
Query OK, 2 row(s) in set (0.001700s)
``` ```
# Developing with TDengine # Developing with TDengine
### Official Connectors
## Official Connectors
TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation. TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation.
- [Java](https://www.taosdata.com/en/documentation/connector/#Java-Connector) - [Java](https://www.taosdata.com/en/documentation/connector/java)
- [C/C++](https://www.taosdata.com/en/documentation/connector/#C/C++-Connector) - [C/C++](https://www.taosdata.com/en/documentation/connector#c-cpp)
- [Python](https://www.taosdata.com/en/documentation/connector/#Python-Connector) - [Python](https://www.taosdata.com/en/documentation/connector#python)
- [Go](https://www.taosdata.com/en/documentation/connector/#Go-Connector) - [Go](https://www.taosdata.com/en/documentation/connector#go)
- [RESTful API](https://www.taosdata.com/en/documentation/connector/#RESTful-Connector) - [RESTful API](https://www.taosdata.com/en/documentation/connector#restful)
- [Node.js](https://www.taosdata.com/en/documentation/connector/#Node.js-Connector) - [Node.js](https://www.taosdata.com/en/documentation/connector#nodejs)
- [Rust](https://www.taosdata.com/en/documentation/connector/rust)
### Third Party Connectors ## Third Party Connectors
The TDengine community has also kindly built some of their own connectors! Follow the links below to find the source code for them. The TDengine community has also kindly built some of their own connectors! Follow the links below to find the source code for them.
- [Rust Connector](https://github.com/taosdata/TDengine/tree/master/tests/examples/rust) - [Rust Bindings](https://github.com/songtianyi/tdengine-rust-bindings/tree/master/examples)
- [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos) - [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos)
- [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua) - [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua)
# How to run the test cases and how to add a new test case? # How to run the test cases and how to add a new test case
TDengine's test framework and all test cases are fully open source.
Please refer to [this document](tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md) for how to run test and develop new test case. TDengine's test framework and all test cases are fully open source.
Please refer to [this document](https://github.com/taosdata/TDengine/blob/develop/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md) for how to run test and develop new test case.
# TDengine Roadmap # TDengine Roadmap
- Support event-driven stream computing - Support event-driven stream computing
- Support user defined functions - Support user defined functions
- Support MQTT connection - Support MQTT connection
......
...@@ -18,6 +18,14 @@ if (NOT DEFINED TD_GRANT) ...@@ -18,6 +18,14 @@ if (NOT DEFINED TD_GRANT)
SET(TD_GRANT FALSE) SET(TD_GRANT FALSE)
endif() endif()
IF ("${WEBSOCKET}" MATCHES "true")
SET(TD_WEBSOCKET TRUE)
MESSAGE("Enable websocket")
ADD_DEFINITIONS(-DWEBSOCKET)
ELSE ()
SET(TD_WEBSOCKET FALSE)
ENDIF ()
IF ("${BUILD_HTTP}" STREQUAL "") IF ("${BUILD_HTTP}" STREQUAL "")
IF (TD_LINUX) IF (TD_LINUX)
IF (TD_ARM_32) IF (TD_ARM_32)
...@@ -97,13 +105,13 @@ ELSE () ...@@ -97,13 +105,13 @@ ELSE ()
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
ENDIF () ENDIF ()
IF (${SANITIZER} MATCHES "true") IF (${BUILD_SANITIZER})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
MESSAGE(STATUS "Will compile with Address Sanitizer!") MESSAGE(STATUS "Will compile with Address Sanitizer!")
ELSE () ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=0")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=0")
ENDIF () ENDIF ()
MESSAGE("System processor ID: ${CMAKE_SYSTEM_PROCESSOR}") MESSAGE("System processor ID: ${CMAKE_SYSTEM_PROCESSOR}")
......
...@@ -13,6 +13,7 @@ ELSEIF (TD_WINDOWS) ...@@ -13,6 +13,7 @@ ELSEIF (TD_WINDOWS)
INSTALL(FILES ${TD_SOURCE_DIR}/packaging/cfg/taos.cfg DESTINATION cfg) INSTALL(FILES ${TD_SOURCE_DIR}/packaging/cfg/taos.cfg DESTINATION cfg)
INSTALL(FILES ${TD_SOURCE_DIR}/include/client/taos.h DESTINATION include) INSTALL(FILES ${TD_SOURCE_DIR}/include/client/taos.h DESTINATION include)
INSTALL(FILES ${TD_SOURCE_DIR}/include/util/taoserror.h DESTINATION include) INSTALL(FILES ${TD_SOURCE_DIR}/include/util/taoserror.h DESTINATION include)
INSTALL(FILES ${TD_SOURCE_DIR}/include/libs/function/taosudf.h DESTINATION include)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos_static.lib DESTINATION driver) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos_static.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
......
...@@ -51,15 +51,44 @@ IF(${TD_WINDOWS}) ...@@ -51,15 +51,44 @@ IF(${TD_WINDOWS})
"If build unit tests using googletest" "If build unit tests using googletest"
ON ON
) )
ELSE () ELSEIF (TD_DARWIN_64)
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
option( option(
BUILD_TEST BUILD_TEST
"If build unit tests using googletest" "If build unit tests using googletest"
ON ON
) )
ELSE ()
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG("-std=c++13" COMPILER_SUPPORTS_CXX13)
IF(${COMPILER_SUPPORTS_CXX13})
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
option(
BUILD_TEST
"If build unit tests using googletest"
ON
)
ELSE ()
option(
BUILD_TEST
"If build unit tests using googletest"
OFF
)
ENDIF ()
ENDIF () ENDIF ()
option(
BUILD_SANITIZER
"If build sanitizer"
OFF
)
option(
TDENGINE_3
"TDengine 3.x"
ON
)
option( option(
BUILD_ADDR2LINE BUILD_ADDR2LINE
"If build addr2line" "If build addr2line"
......
...@@ -70,5 +70,46 @@ ELSEIF (${CMAKE_SYSTEM_NAME} MATCHES "Windows") ...@@ -70,5 +70,46 @@ ELSEIF (${CMAKE_SYSTEM_NAME} MATCHES "Windows")
ENDIF() ENDIF()
IF ("${CPUTYPE}" STREQUAL "")
MESSAGE(STATUS "The current platform " ${CMAKE_SYSTEM_PROCESSOR} " is detected")
IF (CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64)|(AMD64)")
MESSAGE(STATUS "The current platform is amd64")
SET(PLATFORM_ARCH_STR "amd64")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)")
MESSAGE(STATUS "The current platform is x86")
SET(PLATFORM_ARCH_STR "i386")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "armv7l")
MESSAGE(STATUS "The current platform is aarch32")
SET(PLATFORM_ARCH_STR "arm")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
MESSAGE(STATUS "The current platform is aarch64")
SET(PLATFORM_ARCH_STR "arm64")
ENDIF ()
ELSE ()
# if generate ARM version:
# cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64
IF (${CPUTYPE} MATCHES "aarch32")
SET(PLATFORM_ARCH_STR "arm")
MESSAGE(STATUS "input cpuType: aarch32")
ELSEIF (${CPUTYPE} MATCHES "aarch64")
SET(PLATFORM_ARCH_STR "arm64")
MESSAGE(STATUS "input cpuType: aarch64")
ELSEIF (${CPUTYPE} MATCHES "mips64")
SET(PLATFORM_ARCH_STR "mips")
MESSAGE(STATUS "input cpuType: mips64")
ELSEIF (${CPUTYPE} MATCHES "x64")
SET(PLATFORM_ARCH_STR "amd64")
MESSAGE(STATUS "input cpuType: x64")
ELSEIF (${CPUTYPE} MATCHES "x86")
SET(PLATFORM_ARCH_STR "i386")
MESSAGE(STATUS "input cpuType: x86")
ELSE ()
MESSAGE(STATUS "input cpuType unknown " ${CPUTYPE})
ENDIF ()
ENDIF ()
MESSAGE(STATUS "platform arch:" ${PLATFORM_ARCH_STR})
MESSAGE("C Compiler ID: ${CMAKE_C_COMPILER_ID}") MESSAGE("C Compiler ID: ${CMAKE_C_COMPILER_ID}")
MESSAGE("CXX Compiler ID: ${CMAKE_CXX_COMPILER_ID}") MESSAGE("CXX Compiler ID: ${CMAKE_CXX_COMPILER_ID}")
...@@ -118,6 +118,7 @@ execute_process(COMMAND "${CMAKE_COMMAND}" --build . ...@@ -118,6 +118,7 @@ execute_process(COMMAND "${CMAKE_COMMAND}" --build .
# ================================================================================================ # ================================================================================================
# googletest # googletest
if(${BUILD_TEST}) if(${BUILD_TEST})
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
add_subdirectory(googletest EXCLUDE_FROM_ALL) add_subdirectory(googletest EXCLUDE_FROM_ALL)
target_include_directories( target_include_directories(
gtest gtest
...@@ -259,7 +260,7 @@ if(${BUILD_MSVCREGEX}) ...@@ -259,7 +260,7 @@ if(${BUILD_MSVCREGEX})
SET_TARGET_PROPERTIES(msvcregex PROPERTIES OUTPUT_NAME msvcregex) SET_TARGET_PROPERTIES(msvcregex PROPERTIES OUTPUT_NAME msvcregex)
endif(${BUILD_MSVCREGEX}) endif(${BUILD_MSVCREGEX})
# msvcregex # wcwidth
if(${BUILD_WCWIDTH}) if(${BUILD_WCWIDTH})
add_library(wcwidth STATIC "") add_library(wcwidth STATIC "")
target_sources(wcwidth target_sources(wcwidth
......
...@@ -24,3 +24,4 @@ if(${BUILD_WITH_TRAFT}) ...@@ -24,3 +24,4 @@ if(${BUILD_WITH_TRAFT})
endif(${BUILD_WITH_TRAFT}) endif(${BUILD_WITH_TRAFT})
add_subdirectory(tdev) add_subdirectory(tdev)
add_subdirectory(lz4)
add_executable(lz4_test "")
target_sources(lz4_test
PRIVATE
"main.c"
)
target_link_libraries(lz4_test lz4_static)
\ No newline at end of file
#include <stdio.h>
#include "lz4.h"
int main(int argc, char const *argv[]) {
printf("%d\n", LZ4_compressBound(1024));
return 0;
}
...@@ -9,15 +9,15 @@ The data model employed by TDengine is similar to that of a relational database. ...@@ -9,15 +9,15 @@ The data model employed by TDengine is similar to that of a relational database.
The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. Below is an example of the SQL statement to create a database. The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. Below is an example of the SQL statement to create a database.
```sql ```sql
CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1; CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 VGROUPS 100 WAL 1;
``` ```
In the above SQL statement: In the above SQL statement:
- a database named "power" will be created - a database named "power" will be created
- the data in it will be kept for 365 days, which means that data older than 365 days will be deleted automatically - the data in it will be kept for 365 days, which means that data older than 365 days will be deleted automatically
- a new data file will be created every 10 days - a new data file will be created every 10 days
- the number of memory blocks is 6 - the size of memory cache for writing is 16 MB
- data is allowed to be updated - data will be firstly written to WAL without FSYNC
For more details please refer to [Database](/taos-sql/database). For more details please refer to [Database](/taos-sql/database).
...@@ -30,7 +30,6 @@ USE power; ...@@ -30,7 +30,6 @@ USE power;
:::note :::note
- Any table or STable must belong to a database. To create a table or STable, the database it belongs to must be ready. - Any table or STable must belong to a database. To create a table or STable, the database it belongs to must be ready.
- JOIN operations can't be performed on tables from two different databases.
- Timestamp needs to be specified when inserting rows or querying historical rows. - Timestamp needs to be specified when inserting rows or querying historical rows.
::: :::
...@@ -52,7 +51,7 @@ Similar to creating a regular table, when creating a STable, the name and schema ...@@ -52,7 +51,7 @@ Similar to creating a regular table, when creating a STable, the name and schema
For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices. For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices.
At most 4096 (or 1024 prior to version 2.1.7.0) columns are allowed in a STable. If there are more than 4096 of metrics to be collected for a data collection point, multiple STables are required. There can be multiple databases in a system, while one or more STables can exist in a database. At most 4096 columns are allowed in a STable. If there are more than 4096 of metrics to be collected for a data collection point, multiple STables are required. There can be multiple databases in a system, while one or more STables can exist in a database.
## Create Table ## Create Table
...@@ -66,12 +65,11 @@ In the above SQL statement, "d1001" is the table name, "meters" is the STable na ...@@ -66,12 +65,11 @@ In the above SQL statement, "d1001" is the table name, "meters" is the STable na
In the TDengine system, it's recommended to create a table for a data collection point via STable. A table created via STable is called subtable in some parts of the TDengine documentation. All SQL commands applied on regular tables can be applied on subtables. In the TDengine system, it's recommended to create a table for a data collection point via STable. A table created via STable is called subtable in some parts of the TDengine documentation. All SQL commands applied on regular tables can be applied on subtables.
:::warning
It's not recommended to create a table in a database while using a STable from another database as template.
:::tip :::tip
It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value. It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value.
:::
## Create Table Automatically ## Create Table Automatically
In some circumstances, it's unknown whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exists. In some circumstances, it's unknown whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exists.
......
...@@ -42,7 +42,7 @@ INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, ...@@ -42,7 +42,7 @@ INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3,
### Insert into Multiple Tables ### Insert into Multiple Tables
Data can be inserted into multiple tables in the same SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002". Data can be inserted into multiple tables in single SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002".
```sql ```sql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31); INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
...@@ -52,15 +52,15 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). ...@@ -52,15 +52,15 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
:::info :::info
- Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 48K bytes and each SQL statement can't exceed 1MB. - Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 48 KB bytes and each SQL statement can't exceed 1 MB.
- Inserting with multiple threads can also improve performance. However, depending on the system resources on the application side and the server side, when the number of inserting threads grows beyond a specific point the performance may drop instead of improving. The proper number of threads needs to be tested in a specific environment to find the best number. - Inserting with multiple threads can also improve performance. However, depending on the system resources on the application side and the server side, when the number of inserting threads grows beyond a specific point the performance may drop instead of improving. The proper number of threads needs to be tested in a specific environment to find the best number. The proper number of threads may be impacted by the system resources on the server side, the system resources on the client side, the table schemas, etc.
::: :::
:::warning :::warning
- If the timestamp for the row to be inserted already exists in the table, the behavior depends on the value of parameter `UPDATE`. If it's set to 0 (the default value), the row will be discarded. If it's set to 1, the new values will override the old values for the same row. - If the timestamp for the row to be inserted already exists in the table, the old data will be overritten by the new values for the columns for which new values are provided, columns for which no new values are provided are not impacted.
- The timestamp to be inserted must be newer than the timestamp of subtracting current time by the parameter `KEEP`. If `KEEP` is set to 3650 days, then the data older than 3650 days ago can't be inserted. The timestamp to be inserted can't be newer than the timestamp of current time plus parameter `DAYS`. If `DAYS` is set to 2, the data newer than 2 days later can't be inserted. - The timestamp to be inserted must be newer than the timestamp of subtracting current time by the parameter `KEEP`. If `KEEP` is set to 3650 days, then the data older than 3650 days ago can't be inserted. The timestamp to be inserted can't be newer than the timestamp of current time plus parameter `DURATION`. If `DAYS` is set to 2, the data newer than 2 days later can't be inserted.
::: :::
...@@ -101,7 +101,7 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). ...@@ -101,7 +101,7 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
### Insert with Parameter Binding ### Insert with Parameter Binding
TDengine also provides API support for parameter binding. Similar to MySQL, only `?` can be used in these APIs to represent the parameters to bind. From version 2.1.1.0 and 2.1.2.0, parameter binding support for inserting data has improved significantly to improve the insert performance by avoiding the cost of parsing SQL statements. TDengine also provides API support for parameter binding. Similar to MySQL, only `?` can be used in these APIs to represent the parameters to bind. Parameter binding support for inserting data has improved significantly to improve the insert performance by avoiding the cost of parsing SQL statements.
Parameter binding is available only with native connection. Parameter binding is available only with native connection.
......
...@@ -54,14 +54,14 @@ Database changed. ...@@ -54,14 +54,14 @@ Database changed.
taos> show vgroups; taos> show vgroups;
vgId | tables | status | onlines | v1_dnode | v1_status | compacting | vgId | tables | status | onlines | v1_dnode | v1_status | compacting |
========================================================================================== ==========================================================================================
14 | 38000 | ready | 1 | 1 | master | 0 | 14 | 38000 | ready | 1 | 1 | leader | 0 |
15 | 38000 | ready | 1 | 1 | master | 0 | 15 | 38000 | ready | 1 | 1 | leader | 0 |
16 | 38000 | ready | 1 | 1 | master | 0 | 16 | 38000 | ready | 1 | 1 | leader | 0 |
17 | 38000 | ready | 1 | 1 | master | 0 | 17 | 38000 | ready | 1 | 1 | leader | 0 |
18 | 37001 | ready | 1 | 1 | master | 0 | 18 | 37001 | ready | 1 | 1 | leader | 0 |
19 | 37000 | ready | 1 | 1 | master | 0 | 19 | 37000 | ready | 1 | 1 | leader | 0 |
20 | 37000 | ready | 1 | 1 | master | 0 | 20 | 37000 | ready | 1 | 1 | leader | 0 |
21 | 37000 | ready | 1 | 1 | master | 0 | 21 | 37000 | ready | 1 | 1 | leader | 0 |
Query OK, 8 row(s) in set (0.001154s) Query OK, 8 row(s) in set (0.001154s)
``` ```
...@@ -161,14 +161,14 @@ First `show vgroups` is executed to show the vgroup distribution. ...@@ -161,14 +161,14 @@ First `show vgroups` is executed to show the vgroup distribution.
taos> show vgroups; taos> show vgroups;
vgId | tables | status | onlines | v1_dnode | v1_status | compacting | vgId | tables | status | onlines | v1_dnode | v1_status | compacting |
========================================================================================== ==========================================================================================
14 | 38000 | ready | 1 | 3 | master | 0 | 14 | 38000 | ready | 1 | 3 | leader | 0 |
15 | 38000 | ready | 1 | 3 | master | 0 | 15 | 38000 | ready | 1 | 3 | leader | 0 |
16 | 38000 | ready | 1 | 3 | master | 0 | 16 | 38000 | ready | 1 | 3 | leader | 0 |
17 | 38000 | ready | 1 | 3 | master | 0 | 17 | 38000 | ready | 1 | 3 | leader | 0 |
18 | 37001 | ready | 1 | 3 | master | 0 | 18 | 37001 | ready | 1 | 3 | leader | 0 |
19 | 37000 | ready | 1 | 1 | master | 0 | 19 | 37000 | ready | 1 | 1 | leader | 0 |
20 | 37000 | ready | 1 | 1 | master | 0 | 20 | 37000 | ready | 1 | 1 | leader | 0 |
21 | 37000 | ready | 1 | 1 | master | 0 | 21 | 37000 | ready | 1 | 1 | leader | 0 |
Query OK, 8 row(s) in set (0.001314s) Query OK, 8 row(s) in set (0.001314s)
``` ```
...@@ -191,14 +191,14 @@ Query OK, 0 row(s) in set (0.000575s) ...@@ -191,14 +191,14 @@ Query OK, 0 row(s) in set (0.000575s)
taos> show vgroups; taos> show vgroups;
vgId | tables | status | onlines | v1_dnode | v1_status | v2_dnode | v2_status | compacting | vgId | tables | status | onlines | v1_dnode | v1_status | v2_dnode | v2_status | compacting |
================================================================================================================= =================================================================================================================
14 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 | 14 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
15 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 | 15 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
16 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 | 16 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
17 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 | 17 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
18 | 37001 | ready | 2 | 1 | slave | 3 | master | 0 | 18 | 37001 | ready | 2 | 1 | follower | 3 | leader | 0 |
19 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 | 19 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
20 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 | 20 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
21 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 | 21 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
Query OK, 8 row(s) in set (0.001242s) Query OK, 8 row(s) in set (0.001242s)
``` ```
...@@ -207,7 +207,7 @@ It can be seen from above output that vgId 18 has been moved from dnode 3 to dno ...@@ -207,7 +207,7 @@ It can be seen from above output that vgId 18 has been moved from dnode 3 to dno
:::note :::note
- Manual load balancing can only be performed when the automatic load balancing is disabled, i.e. `balance` is set to 0. - Manual load balancing can only be performed when the automatic load balancing is disabled, i.e. `balance` is set to 0.
- Only a vnode in normal state, i.e. master or slave, can be moved. vnode can't be moved when its in status offline, unsynced or syncing. - Only a vnode in normal state, i.e. leader or follower, can be moved. vnode can't be moved when its in status offline, unsynced or syncing.
- Before moving a vnode, it's necessary to make sure the target dnode has enough resources: CPU, memory and disk. - Before moving a vnode, it's necessary to make sure the target dnode has enough resources: CPU, memory and disk.
::: :::
...@@ -27,7 +27,7 @@ There may be multiple dnodes in a cluster, but only one mnode can be started in ...@@ -27,7 +27,7 @@ There may be multiple dnodes in a cluster, but only one mnode can be started in
SHOW MNODES; SHOW MNODES;
``` ```
The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched. The end point and role/status (leader, follower, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched.
For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher. For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher.
...@@ -58,13 +58,13 @@ When a dnode is offline, it can be detected by the TDengine cluster. There are t ...@@ -58,13 +58,13 @@ When a dnode is offline, it can be detected by the TDengine cluster. There are t
- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster. - If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster.
:::note :::note
If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service. If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the leader node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service.
::: :::
## Arbitrator ## Arbitrator
The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a master node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc. The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a leader node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc.
To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally. To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally.
......
...@@ -32,7 +32,6 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; ...@@ -32,7 +32,6 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1];
- cacheLast: [Description](/reference/config/#cachelast) - cacheLast: [Description](/reference/config/#cachelast)
- replica: [Description](/reference/config/#replica) - replica: [Description](/reference/config/#replica)
- quorum: [Description](/reference/config/#quorum) - quorum: [Description](/reference/config/#quorum)
- maxVgroupsPerDb: [Description](/reference/config/#maxvgroupsperdb)
- comp: [Description](/reference/config/#comp) - comp: [Description](/reference/config/#comp)
- precision: [Description](/reference/config/#precision) - precision: [Description](/reference/config/#precision)
6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement. 6. Please note that all of the parameters mentioned in this section are configured in configuration file `taos.cfg` on the TDengine server. If not specified in the `create database` statement, the values from taos.cfg are used by default. To override default parameters, they must be specified in the `create database` statement.
......
...@@ -21,7 +21,7 @@ The following example is in an Ubuntu environment and uses the `curl` tool to ve ...@@ -21,7 +21,7 @@ The following example is in an Ubuntu environment and uses the `curl` tool to ve
The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
```html ```html
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql
``` ```
The following return value results indicate that the verification passed. The following return value results indicate that the verification passed.
...@@ -106,13 +106,13 @@ The HTTP request's BODY is a complete SQL command, and the data table in the SQL ...@@ -106,13 +106,13 @@ The HTTP request's BODY is a complete SQL command, and the data table in the SQL
Use `curl` to initiate an HTTP request with a custom authentication method, with the following syntax. Use `curl` to initiate an HTTP request with a custom authentication method, with the following syntax.
```bash ```bash
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name] curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
``` ```
Or Or
```bash ```bash
curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name] curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
``` ```
where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`. where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`.
...@@ -192,7 +192,7 @@ Response body: ...@@ -192,7 +192,7 @@ Response body:
- query all records from table d1001 of database demo - query all records from table d1001 of database demo
```bash ```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
``` ```
Response body: Response body:
...@@ -218,7 +218,7 @@ Response body: ...@@ -218,7 +218,7 @@ Response body:
- Create database demo: - Create database demo:
```bash ```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql
``` ```
Response body: Response body:
...@@ -240,7 +240,7 @@ Response body: ...@@ -240,7 +240,7 @@ Response body:
When the HTTP request URL uses `/rest/sqlt`, the returned result set's timestamp value will be in Unix timestamp format, for example: When the HTTP request URL uses `/rest/sqlt`, the returned result set's timestamp value will be in Unix timestamp format, for example:
```bash ```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt
``` ```
Response body: Response body:
...@@ -268,7 +268,7 @@ Response body: ...@@ -268,7 +268,7 @@ Response body:
When the HTTP request URL uses `/rest/sqlutc`, the timestamp of the returned result set will be expressed as a UTC format, for example: When the HTTP request URL uses `/rest/sqlutc`, the timestamp of the returned result set will be expressed as a UTC format, for example:
```bash ```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc
``` ```
Response body: Response body:
......
...@@ -206,8 +206,8 @@ Note: InfluxDB token authorization is not supported at present. Only Basic autho ...@@ -206,8 +206,8 @@ Note: InfluxDB token authorization is not supported at present. Only Basic autho
You can use any client that supports the http protocol to access the RESTful interface address `http://<fqdn>:6041/<APIEndPoint>` to write data in OpenTSDB compatible format to TDengine. You can use any client that supports the http protocol to access the RESTful interface address `http://<fqdn>:6041/<APIEndPoint>` to write data in OpenTSDB compatible format to TDengine.
```text ```text
/opentsdb/v1/put/json/:db /opentsdb/v1/put/json/<db>
/opentsdb/v1/put/telnet/:db /opentsdb/v1/put/telnet/<db>
``` ```
### collectd ### collectd
......
...@@ -211,7 +211,7 @@ ...@@ -211,7 +211,7 @@
], ],
"timeFrom": null, "timeFrom": null,
"timeShift": null, "timeShift": null,
"title": "Master MNode", "title": "Leader MNode",
"transformations": [ "transformations": [
{ {
"id": "filterByValue", "id": "filterByValue",
...@@ -221,7 +221,7 @@ ...@@ -221,7 +221,7 @@
"config": { "config": {
"id": "regex", "id": "regex",
"options": { "options": {
"value": "master" "value": "leader"
} }
}, },
"fieldName": "role" "fieldName": "role"
...@@ -300,7 +300,7 @@ ...@@ -300,7 +300,7 @@
], ],
"timeFrom": null, "timeFrom": null,
"timeShift": null, "timeShift": null,
"title": "Master MNode Create Time", "title": "Leader MNode Create Time",
"transformations": [ "transformations": [
{ {
"id": "filterByValue", "id": "filterByValue",
...@@ -310,7 +310,7 @@ ...@@ -310,7 +310,7 @@
"config": { "config": {
"id": "regex", "id": "regex",
"options": { "options": {
"value": "master" "value": "leader"
} }
}, },
"fieldName": "role" "fieldName": "role"
......
...@@ -153,7 +153,7 @@ ...@@ -153,7 +153,7 @@
], ],
"timeFrom": null, "timeFrom": null,
"timeShift": null, "timeShift": null,
"title": "Master MNode", "title": "Leader MNode",
"transformations": [ "transformations": [
{ {
"id": "filterByValue", "id": "filterByValue",
...@@ -163,7 +163,7 @@ ...@@ -163,7 +163,7 @@
"config": { "config": {
"id": "regex", "id": "regex",
"options": { "options": {
"value": "master" "value": "leader"
} }
}, },
"fieldName": "role" "fieldName": "role"
...@@ -246,7 +246,7 @@ ...@@ -246,7 +246,7 @@
], ],
"timeFrom": null, "timeFrom": null,
"timeShift": null, "timeShift": null,
"title": "Master MNode Create Time", "title": "Leader MNode Create Time",
"transformations": [ "transformations": [
{ {
"id": "filterByValue", "id": "filterByValue",
...@@ -256,7 +256,7 @@ ...@@ -256,7 +256,7 @@
"config": { "config": {
"id": "regex", "id": "regex",
"options": { "options": {
"value": "master" "value": "leader"
} }
}, },
"fieldName": "role" "fieldName": "role"
......
...@@ -274,8 +274,8 @@ Details of the metrics are as follows. ...@@ -274,8 +274,8 @@ Details of the metrics are as follows.
This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom). This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom).
- **First EP**: the `firstEp` setting in the current TDengine cluster. - **First EP**: the `firstEp` setting in the current TDengine cluster.
- **Version**: TDengine server version (master mnode). - **Version**: TDengine server version (leader mnode).
- **Master Uptime**: The time elapsed since the current Master MNode was elected as Master. - **Leader Uptime**: The time elapsed since the current Leader MNode was elected as Leader.
- **Expire Time** - Enterprise version expiration time. - **Expire Time** - Enterprise version expiration time.
- **Used Measuring Points** - The number of measuring points used by the Enterprise Edition. - **Used Measuring Points** - The number of measuring points used by the Enterprise Edition.
- **Databases** - The number of databases. - **Databases** - The number of databases.
...@@ -333,7 +333,7 @@ Data node resource usage display with repeated multiple rows for the variable `$ ...@@ -333,7 +333,7 @@ Data node resource usage display with repeated multiple rows for the variable `$
2. **Has MNodes?**: whether the current dnode is a mnode. 2. **Has MNodes?**: whether the current dnode is a mnode.
3. **CPU Cores**: the number of CPU cores. 3. **CPU Cores**: the number of CPU cores.
4. **VNodes Number**: the number of VNodes in the current dnode. 4. **VNodes Number**: the number of VNodes in the current dnode.
5. **VNodes Masters**: the number of vnodes in the master role. 5. **VNodes Masters**: the number of vnodes in the leader role.
6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes. 6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes.
7. **Current Memory Usage of taosd**: memory usage of taosd processes. 7. **Current Memory Usage of taosd**: memory usage of taosd processes.
8. **Disk Used**: The total disk usage percentage of the taosd data directory. 8. **Disk Used**: The total disk usage percentage of the taosd data directory.
......
...@@ -26,7 +26,6 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d ...@@ -26,7 +26,6 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d
- _remove.sh_: script to uninstall TDengine, please execute it carefully, link to the **rmtaos** command in the /usr/bin directory. Will remove the TDengine installation directory `/usr/local/taos`, but will keep `/etc/taos`, `/var/lib/taos`, `/var/log/taos` - _remove.sh_: script to uninstall TDengine, please execute it carefully, link to the **rmtaos** command in the /usr/bin directory. Will remove the TDengine installation directory `/usr/local/taos`, but will keep `/etc/taos`, `/var/lib/taos`, `/var/log/taos`
- _taosadapter_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares - _taosadapter_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares
- _tarbitrator_: provides arbitration for two-node cluster deployments - _tarbitrator_: provides arbitration for two-node cluster deployments
- _run_taosd_and_taosadapter.sh_: script to start both taosd and taosAdapter
- _TDinsight.sh_: script to download TDinsight and install it - _TDinsight.sh_: script to download TDinsight and install it
- _set_core.sh_: script for setting up the system to generate core dump files for easy debugging - _set_core.sh_: script for setting up the system to generate core dump files for easy debugging
- _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution. - _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution.
......
...@@ -22,9 +22,9 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc ...@@ -22,9 +22,9 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node. **Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node.
**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction. **Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The leader/follower mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the leader. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction.
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. **Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a leader/follower mechanism. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
**TAOSC**: TAOSC is the driver provided by TDengine to applications. It is responsible for dealing with the interaction between application and cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster. **TAOSC**: TAOSC is the driver provided by TDengine to applications. It is responsible for dealing with the interaction between application and cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster.
...@@ -62,13 +62,13 @@ To explain the relationship between vnode, mnode, TAOSC and application and thei ...@@ -62,13 +62,13 @@ To explain the relationship between vnode, mnode, TAOSC and application and thei
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs. 1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
2. TAOSC checks the cache to see if meta data exists for the table. If it does, it goes straight to Step 4. If not, TAOSC sends a get meta-data request to mnode. 2. TAOSC checks the cache to see if meta data exists for the table. If it does, it goes straight to Step 4. If not, TAOSC sends a get meta-data request to mnode.
3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode. 3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode.
4. TAOSC initiates an insert request to master vnode. 4. TAOSC initiates an insert request to leader vnode.
5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup. 5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup.
6. TAOSC notifies APP that writing is successful. 6. TAOSC notifies APP that writing is successful.
For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to obtain meta-data to the EP of another mnode. For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to obtain meta-data to the EP of another mnode.
For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of master node. For Step 4 and 5, without caching, TAOSC can't recognize the leader in the virtual node group, so assumes that the first vnode is the leader and sends a request to it. If this vnode is not the leader, it will reply to the actual leader as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of leader node.
The above describes the process of inserting data. The processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications. The above describes the process of inserting data. The processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications.
...@@ -119,65 +119,65 @@ The load balancing process does not require any manual intervention, and it is t ...@@ -119,65 +119,65 @@ The load balancing process does not require any manual intervention, and it is t
## Data Writing and Replication Process ## Data Writing and Replication Process
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect. If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect.
### Master vnode Writing Process ### Leader vnode Writing Process
Master Vnode uses a writing process as follows: Leader Vnode uses a writing process as follows:
![TDengine Database Master Writing Process](write_master.webp) ![TDengine Database Leader Writing Process](write_master.webp)
<center> Figure 3: TDengine Master writing process </center> <center> Figure 3: TDengine Leader writing process </center>
1. Master vnode receives the application data insertion request, verifies, and moves to next step; 1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; 2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data; 3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Write into memory and add the record to “skip list”; 4. Write into memory and add the record to “skip list”;
5. Master vnode returns a confirmation message to the application, indicating a successful write. 5. Leader vnode returns a confirmation message to the application, indicating a successful write.
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application. 6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
### Slave vnode Writing Process ### Follower vnode Writing Process
For a slave vnode, the write process as follows: For a follower vnode, the write process as follows:
![TDengine Database Slave Writing Process](write_slave.webp) ![TDengine Database Follower Writing Process](write_slave.webp)
<center> Figure 4: TDengine Slave Writing Process </center> <center> Figure 4: TDengine Follower Writing Process </center>
1. Slave vnode receives a data insertion request forwarded by Master vnode; 1. Follower vnode receives a data insertion request forwarded by Leader vnode;
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; 2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
3. Write into memory and add the record to “skip list”. 3. Write into memory and add the record to “skip list”.
Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same. Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same.
### Remote Disaster Recovery and IDC (Internet Data Center) Migration ### Remote Disaster Recovery and IDC (Internet Data Center) Migration
As discussed above, TDengine writes using Master and Slave processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. As discussed above, TDengine writes using Leader and Follower processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, leader and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
However, the asynchronous replication has a very low probability scenario where data may be lost. The specific scenario is as follows: However, the asynchronous replication has a very low probability scenario where data may be lost. The specific scenario is as follows:
1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down; 1. Leader vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down;
2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2; 2. Follower vnode receives the write request, then processing fails before writing to the log in Step 2;
3. Slave vnode will become the new master, thus losing one record. 3. Follower vnode will become the new leader, thus losing one record.
In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this is an extremely low probability scenario as described above. In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this is an extremely low probability scenario as described above.
Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet** Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet**
### Master/slave Selection ### Leader/follower Selection
Vnode maintains a version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one. Vnode maintains a version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
When a vnode starts, the roles (master, slave) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a master-selection process. The rules are as follows: When a vnode starts, the roles (leader, follower) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a leader-selection process. The rules are as follows:
1. If there’s only one replica, it’s always master 1. If there’s only one replica, it’s always leader
2. When all replicas are online, the one with latest version is master 2. When all replicas are online, the one with latest version is leader
3. Over half of online nodes are virtual nodes, and some virtual node is slave, it will automatically become master 3. Over half of online nodes are virtual nodes, and some virtual node is follower, it will automatically become leader
4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master. 4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as leader.
### Synchronous Replication ### Synchronous Replication
For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application. For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Leader forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in follower. If “quorum-1” reply confirms are not received within a certain period of time, the leader vnode will return an error to the application.
With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistency, the default for data synchronization between mnodes is synchronous replication. With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistency, the default for data synchronization between mnodes is synchronous replication.
......
...@@ -379,11 +379,11 @@ We still use the hypothetical environment from Chapter 4. There are three measur ...@@ -379,11 +379,11 @@ We still use the hypothetical environment from Chapter 4. There are three measur
### Storage resource estimation ### Storage resource estimation
Assuming that the number of sensor devices that generate data and need to be stored is `n`, the frequency of data generation is `t` per second, and the length of each record is `L` bytes, the scale of data generated per day is `n * t * L` bytes. Assuming the compression ratio is `C`, the daily data size is `(n * t * L)/C` bytes. The storage resources are estimated to accommodate the data scale for 1.5 years. In the production environment, the compression ratio C of TDengine is generally between 5 and 7. Assuming that the number of sensor devices that generate data and need to be stored is `n`, the frequency of data generation is `t` per second, and the length of each record is `L` bytes, the scale of data generated per day is `86400 * n * t * L` bytes. Assuming the compression ratio is `C`, the daily data size is `(86400 * n * t * L)/C` bytes. The storage resources are estimated to accommodate the data scale for 1.5 years. In the production environment, the compression ratio C of TDengine is generally between 5 and 7.
With additional 20% redundancy, you can calculate the required storage resources: With additional 20% redundancy, you can calculate the required storage resources:
```matlab ```matlab
(n * t * L) * (365 * 1.5) * (1+20%)/C (86400 * n * t * L) * (365 * 1.5) * (1+20%)/C
```` ````
Substituting in the above formula, the raw data generated every year is 11.8TB without considering the label information. Note that tag information is associated with each timeline in TDengine, not every record. The amount of data to be recorded is somewhat reduced relative to the generated data, and label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB. Substituting in the above formula, the raw data generated every year is 11.8TB without considering the label information. Note that tag information is associated with each timeline in TDengine, not every record. The amount of data to be recorded is somewhat reduced relative to the generated data, and label data can be ignored as a whole. Assuming a compression ratio of 5, the size of the retained data ends up being 2.56 TB.
......
...@@ -109,7 +109,7 @@ taos> ...@@ -109,7 +109,7 @@ taos>
It's also able to access the REST interface provided by TDengine in container from the host. It's also able to access the REST interface provided by TDengine in container from the host.
``` ```
curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
``` ```
Output is like below: Output is like below:
...@@ -147,7 +147,7 @@ docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-604 ...@@ -147,7 +147,7 @@ docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-604
- Verify the REST interface: - Verify the REST interface:
```bash ```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" 127.0.0.1:6041/rest/sql
``` ```
Below is an example output: Below is an example output:
......
...@@ -26,7 +26,7 @@ public class LineProtocolExample { ...@@ -26,7 +26,7 @@ public class LineProtocolExample {
private static void createDatabase(Connection conn) throws SQLException { private static void createDatabase(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
// the default precision is ms (microsecond), but we use us(microsecond) here. // the default precision is ms (millisecond), but we use us(microsecond) here.
stmt.execute("CREATE DATABASE IF NOT EXISTS test PRECISION 'us'"); stmt.execute("CREATE DATABASE IF NOT EXISTS test PRECISION 'us'");
stmt.execute("USE test"); stmt.execute("USE test");
} }
......
...@@ -8,13 +8,13 @@ TDengine 采用类关系型数据模型,需要建库、建表。因此对于 ...@@ -8,13 +8,13 @@ TDengine 采用类关系型数据模型,需要建库、建表。因此对于
## 创建库 ## 创建库
不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作,TDengine 建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除 SQL 标准的选项外,还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如: 不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作,TDengine 建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除 SQL 标准的选项外,还可以指定保留时长、副本数、缓存大小、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如:
```sql ```sql
CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1; CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 VGROUPS 100 WAL 1;
``` ```
上述语句将创建一个名为 power 的库,这个库的数据将保留 365 天(超过 365 天将被自动删除),每 10 天一个数据文件,内存块数为 6,允许更新数据。详细的语法及参数请见 [数据库管理](/taos-sql/database) 章节。 上述语句将创建一个名为 power 的库,这个库的数据将保留 365 天(超过 365 天将被自动删除),每 10 天一个数据文件,每个 VNODE 的写入内存池的大小为 16 MB,数据库的 VGROUPS 数量,对该数据库入会写 WAL 但不执行 FSYNC。详细的语法及参数请见 [数据库管理](/taos-sql/database) 章节。
创建库之后,需要使用 SQL 命令 `USE` 将当前库切换过来,例如: 创建库之后,需要使用 SQL 命令 `USE` 将当前库切换过来,例如:
...@@ -27,7 +27,6 @@ USE power; ...@@ -27,7 +27,6 @@ USE power;
:::note :::note
- 任何一张表或超级表必须属于某个库,在创建表之前,必须先创建库。 - 任何一张表或超级表必须属于某个库,在创建表之前,必须先创建库。
- 处于两个不同库的表是不能进行 JOIN 操作的。
- 创建并插入记录、查询历史记录的时候,均需要指定时间戳。 - 创建并插入记录、查询历史记录的时候,均需要指定时间戳。
::: :::
...@@ -40,15 +39,11 @@ USE power; ...@@ -40,15 +39,11 @@ USE power;
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
``` ```
:::note
这一指令中的 STABLE 关键字,在 2.0.15 之前的版本中需写作 TABLE 。
:::
与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TAOS SQL 的超级表管理](/taos-sql/stable) 章节。 与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TAOS SQL 的超级表管理](/taos-sql/stable) 章节。
每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。 每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。
一张超级表最多容许 4096 列 (在 2.1.7.0 版本之前,列数限制为 1024 列),如果一个采集点采集的物理量个数超过 4096,需要建多张超级表来处理。一个系统可以有多个 DB,一个 DB 里可以有一到多个超级表。 一张超级表最多容许 4096 列,如果一个采集点采集的物理量个数超过 4096,需要建多张超级表来处理。一个系统可以有多个 DB,一个 DB 里可以有一到多个超级表。
## 创建表 ## 创建表
...@@ -60,11 +55,6 @@ CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); ...@@ -60,11 +55,6 @@ CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。 其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。
:::warning
目前 TDengine 没有从技术层面限制使用一个 database (db1) 的超级表作为模板建立另一个 database (db2) 的子表,后续会禁止这种用法,不建议使用这种方法建表。
:::
TDengine 建议将数据采集点的全局唯一 ID 作为表名(比如设备序列号)。但对于有的场景,并没有唯一的 ID,可以将多个 ID 组合成一个唯一的 ID。不建议将具有唯一性的 ID 作为标签值。 TDengine 建议将数据采集点的全局唯一 ID 作为表名(比如设备序列号)。但对于有的场景,并没有唯一的 ID,可以将多个 ID 组合成一个唯一的 ID。不建议将具有唯一性的 ID 作为标签值。
### 自动建表 ### 自动建表
......
...@@ -52,15 +52,15 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, ...@@ -52,15 +52,15 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
:::info :::info
- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 48K,一条 SQL 语句总长度不能超过 1M 。 - 要提高写入效率,需要批量写入。一般来说一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 48K,一条 SQL 语句总长度不能超过 1M 。
- TDengine 支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开 20 个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程频繁切换,带来额外开销 - TDengine 支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开多个同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程频繁切换,会带来额外开销,合适的线程数量与服务端的处理能力,服务端的具体配置,数据库的参数,数据定义的 Schema,写入数据的 Batch Size 等很多因素相关。一般来说,服务端和客户端处理能力越强,所能支持的并发写入的线程可以越多;数据库配置时的 vgroups 越多(但仍然要在服务端的处理能力以内)则所能支持的并发写入越多;数据定义的 Schema 越简单,所能支持的并发写入越多
::: :::
:::warning :::warning
- 对同一张表,如果新插入记录的时间戳已经存在,默认情形下(UPDATE=0)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用了 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录 - 对同一张表,如果新插入记录的时间戳已经存在,则指定了新值的列会用新值覆盖旧值,而没有指定新值的列则不受影响
- 写入的数据的时间戳必须大于当前时间减去配置参数 keep 的时间。如果 keep 配置为 3650 天,那么无法写入比 3650 天还早的数据。写入数据的时间戳也不能大于当前时间加配置参数 days。如果 days 为 2,那么无法写入比当前时间还晚 2 天的数据。 - 写入的数据的时间戳必须大于当前时间减去配置参数 keep 的时间。如果 keep 配置为 3650 天,那么无法写入比 3650 天还早的数据。写入数据的时间戳也不能大于当前时间加配置参数 duration。如果 duration 为 2,那么无法写入比当前时间还晚 2 天的数据。
::: :::
...@@ -104,7 +104,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, ...@@ -104,7 +104,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
### 参数绑定写入 ### 参数绑定写入
TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 类似,这些 API 目前也仅支持用问号 `?` 来代表待绑定的参数。从 2.1.1.0 和 2.1.2.0 版本开始,TDengine 大幅改进了参数绑定接口对数据写入(INSERT)场景的支持。这样在通过参数绑定接口写入数据时,就避免了 SQL 语法解析的资源消耗,从而在绝大多数情况下显著提升写入性能。 TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 类似,这些 API 目前也仅支持用问号 `?` 来代表待绑定的参数。在通过参数绑定接口写入数据时,就避免了 SQL 语法解析的资源消耗,从而在绝大多数情况下显著提升写入性能。
需要注意的是,只有使用原生连接的连接器,才能使用参数绑定功能。 需要注意的是,只有使用原生连接的连接器,才能使用参数绑定功能。
......
...@@ -32,7 +32,6 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1]; ...@@ -32,7 +32,6 @@ CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1];
- cacheLast: [详细说明](/reference/config/#cachelast) - cacheLast: [详细说明](/reference/config/#cachelast)
- replica: [详细说明](/reference/config/#replica) - replica: [详细说明](/reference/config/#replica)
- quorum: [详细说明](/reference/config/#quorum) - quorum: [详细说明](/reference/config/#quorum)
- maxVgroupsPerDb: [详细说明](/reference/config/#maxvgroupsperdb)
- comp: [详细说明](/reference/config/#comp) - comp: [详细说明](/reference/config/#comp)
- precision: [详细说明](/reference/config/#precision) - precision: [详细说明](/reference/config/#precision)
6. 请注意上面列出的所有参数都可以配置在配置文件 `taosd.cfg` 中作为创建数据库时使用的默认配置, `create database` 的参数中明确指定的会覆盖配置文件中的设置。 6. 请注意上面列出的所有参数都可以配置在配置文件 `taosd.cfg` 中作为创建数据库时使用的默认配置, `create database` 的参数中明确指定的会覆盖配置文件中的设置。
......
...@@ -21,7 +21,7 @@ RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安 ...@@ -21,7 +21,7 @@ RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安
下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号: 下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号:
```html ```html
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql
``` ```
返回值结果如下表示验证通过: 返回值结果如下表示验证通过:
...@@ -106,13 +106,13 @@ HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据 ...@@ -106,13 +106,13 @@ HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据
使用 `curl` 通过自定义身份认证方式来发起一个 HTTP Request,语法如下: 使用 `curl` 通过自定义身份认证方式来发起一个 HTTP Request,语法如下:
```bash ```bash
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name] curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
``` ```
或者 或者
```bash ```bash
curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name] curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
``` ```
其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==` 其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`
...@@ -192,7 +192,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata ...@@ -192,7 +192,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
- 在 demo 库里查询表 d1001 的所有记录: - 在 demo 库里查询表 d1001 的所有记录:
```bash ```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
``` ```
返回值: 返回值:
...@@ -218,7 +218,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata ...@@ -218,7 +218,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
- 创建库 demo: - 创建库 demo:
```bash ```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql
``` ```
返回值: 返回值:
...@@ -240,7 +240,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata ...@@ -240,7 +240,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
HTTP 请求 URL 采用 `/rest/sqlt` 时,返回结果集的时间戳将采用 Unix 时间戳格式表示,例如 HTTP 请求 URL 采用 `/rest/sqlt` 时,返回结果集的时间戳将采用 Unix 时间戳格式表示,例如
```bash ```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt
``` ```
返回结果: 返回结果:
...@@ -268,7 +268,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001 ...@@ -268,7 +268,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001
HTTP 请求 URL 采用 `/rest/sqlutc` 时,返回结果集的时间戳将采用 UTC 时间字符串表示,例如 HTTP 请求 URL 采用 `/rest/sqlutc` 时,返回结果集的时间戳将采用 UTC 时间字符串表示,例如
```bash ```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc
``` ```
返回值: 返回值:
......
...@@ -207,8 +207,8 @@ AllowWebSockets ...@@ -207,8 +207,8 @@ AllowWebSockets
您可以使用任何支持 http 协议的客户端访问 Restful 接口地址 `http://<fqdn>:6041/<APIEndPoint>` 来写入 OpenTSDB 兼容格式的数据到 TDengine。EndPoint 如下: 您可以使用任何支持 http 协议的客户端访问 Restful 接口地址 `http://<fqdn>:6041/<APIEndPoint>` 来写入 OpenTSDB 兼容格式的数据到 TDengine。EndPoint 如下:
```text ```text
/opentsdb/v1/put/json/:db /opentsdb/v1/put/json/<db>
/opentsdb/v1/put/telnet/:db /opentsdb/v1/put/telnet/<db>
``` ```
### collectd ### collectd
......
...@@ -26,7 +26,6 @@ TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 ...@@ -26,7 +26,6 @@ TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- _remove.sh_:卸载 TDengine 的脚本,请谨慎执行,链接到/usr/bin 目录下的**rmtaos**命令。会删除 TDengine 的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos - _remove.sh_:卸载 TDengine 的脚本,请谨慎执行,链接到/usr/bin 目录下的**rmtaos**命令。会删除 TDengine 的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos
- _taosadapter_: 提供 RESTful 服务和接受其他多种软件写入请求的服务端可执行文件 - _taosadapter_: 提供 RESTful 服务和接受其他多种软件写入请求的服务端可执行文件
- _tarbitrator_: 提供双节点集群部署的仲裁功能 - _tarbitrator_: 提供双节点集群部署的仲裁功能
- _run_taosd_and_taosadapter.sh_:同时启动 taosd 和 taosAdapter 的脚本
- _TDinsight.sh_:用于下载 TDinsight 并安装的脚本 - _TDinsight.sh_:用于下载 TDinsight 并安装的脚本
- _set_core.sh_:用于方便调试设置系统生成 core dump 文件的脚本 - _set_core.sh_:用于方便调试设置系统生成 core dump 文件的脚本
- _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。 - _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。
......
...@@ -23,7 +23,7 @@ TDengine 分布式架构的逻辑结构图如下: ...@@ -23,7 +23,7 @@ TDengine 分布式架构的逻辑结构图如下:
**虚拟节点(vnode):** 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中 V2,V3,V4 等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个 DB,但一个 DB 可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的 schema、标签值等。一个虚拟节点由所属的数据节点的 EP,以及所属的 VGroup ID 在系统内唯一标识,由管理节点创建并管理。 **虚拟节点(vnode):** 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中 V2,V3,V4 等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个 DB,但一个 DB 可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的 schema、标签值等。一个虚拟节点由所属的数据节点的 EP,以及所属的 VGroup ID 在系统内唯一标识,由管理节点创建并管理。
**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(开源版最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M0,M1,M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步,任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。 **管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M0,M1,M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步,任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。
**虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vgroup)来保证系统的高可靠。虚拟节点组内采取 master/slave 的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个 DB 的副本数为 N,系统必须有至少 N 数据节点。副本数在创建 DB 时通过参数 replica 可以指定,缺省为 1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的 ID,VGroup ID。如果两个虚拟节点的 VGroup ID 相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID 是永远不变的,即使一个虚拟节点组被删除,它的 ID 也不会被收回重复利用。 **虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vgroup)来保证系统的高可靠。虚拟节点组内采取 master/slave 的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个 DB 的副本数为 N,系统必须有至少 N 数据节点。副本数在创建 DB 时通过参数 replica 可以指定,缺省为 1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的 ID,VGroup ID。如果两个虚拟节点的 VGroup ID 相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID 是永远不变的,即使一个虚拟节点组被删除,它的 ID 也不会被收回重复利用。
......
...@@ -367,10 +367,10 @@ WHERE ts>=1510560000 AND ts<=1515000009 ...@@ -367,10 +367,10 @@ WHERE ts>=1510560000 AND ts<=1515000009
### 存储资源估算 ### 存储资源估算
假设产生数据并需要存储的传感器设备数量为 `n`,数据生成的频率为`t`条/秒,每条记录的长度为 `L` bytes,则每天产生的数据规模为 `n×t×L` bytes。假设压缩比为 C,则每日产生数据规模为 `(n×t×L)/C` bytes。存储资源预估为能够容纳 1.5 年的数据规模,生产环境下 TDengine 的压缩比 C 一般在 5 ~ 7 之间,同时为最后结果增加 20% 的冗余,可计算得到需要存储资源: 假设产生数据并需要存储的传感器设备数量为 `n`,数据生成的频率为`t`条/秒,每条记录的长度为 `L` bytes,则每天产生的数据规模为 `86400×n×t×L` bytes。假设压缩比为 C,则每日产生数据规模为 `(86400×n×t×L)/C` bytes。存储资源预估为能够容纳 1.5 年的数据规模,生产环境下 TDengine 的压缩比 C 一般在 5 ~ 7 之间,同时为最后结果增加 20% 的冗余,可计算得到需要存储资源:
```matlab ```matlab
(n×t×L)×(365×1.5)×(1+20%)/C (86400×n×t×L)×(365×1.5)×(1+20%)/C
``` ```
结合以上的计算公式,将参数带入计算公式,在不考虑标签信息的情况下,每年产生的原始数据规模是 11.8TB。需要注意的是,由于标签信息在 TDengine 中关联到每个时间线,并不是每条记录。所以需要记录的数据量规模相对于产生的数据有一定的降低,而这部分标签数据整体上可以忽略不记。假设压缩比为 5,则保留的数据规模最终为 2.56 TB。 结合以上的计算公式,将参数带入计算公式,在不考虑标签信息的情况下,每年产生的原始数据规模是 11.8TB。需要注意的是,由于标签信息在 TDengine 中关联到每个时间线,并不是每条记录。所以需要记录的数据量规模相对于产生的数据有一定的降低,而这部分标签数据整体上可以忽略不记。假设压缩比为 5,则保留的数据规模最终为 2.56 TB。
......
...@@ -108,7 +108,7 @@ taos> ...@@ -108,7 +108,7 @@ taos>
也可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。 也可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。
``` ```
curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
``` ```
输出示例如下: 输出示例如下:
...@@ -148,7 +148,7 @@ docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-604 ...@@ -148,7 +148,7 @@ docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-604
使用 curl 命令验证 RESTful 接口可以正常工作: 使用 curl 命令验证 RESTful 接口可以正常工作:
```bash ```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" 127.0.0.1:6041/rest/sql
``` ```
输出示例如下: 输出示例如下:
......
...@@ -25,7 +25,7 @@ int32_t init_env() { ...@@ -25,7 +25,7 @@ int32_t init_env() {
return -1; return -1;
} }
TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1"); TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 2");
if (taos_errno(pRes) != 0) { if (taos_errno(pRes) != 0) {
printf("error in create db, reason:%s\n", taos_errstr(pRes)); printf("error in create db, reason:%s\n", taos_errstr(pRes));
return -1; return -1;
...@@ -68,6 +68,14 @@ int32_t init_env() { ...@@ -68,6 +68,14 @@ int32_t init_env() {
return -1; return -1;
} }
taos_free_result(pRes); taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists tu3 using st1 tags(3)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table tu3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
return 0; return 0;
} }
...@@ -90,9 +98,10 @@ int32_t create_stream() { ...@@ -90,9 +98,10 @@ int32_t create_stream() {
/*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/ /*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/
/*const char* sql = "select sum(k) from tu1 interval(10m)";*/ /*const char* sql = "select sum(k) from tu1 interval(10m)";*/
/*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/ /*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/
pRes = taos_query(pConn, pRes =
"create stream stream1 trigger window_close into outstb as select _wstartts, sum(k) from st1 " taos_query(pConn,
"interval(10s) "); "create stream stream1 trigger max_delay 10s into outstb as select _wstart, sum(k) from st1 partition "
"by tbname session(ts, 10s) ");
if (taos_errno(pRes) != 0) { if (taos_errno(pRes) != 0) {
printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes)); printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes));
return -1; return -1;
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <string.h> #include <string.h>
#include <time.h> #include <time.h>
#include "taos.h" #include "taos.h"
#include <stdlib.h>
static int running = 1; static int running = 1;
static void msg_process(TAOS_RES* msg) { static void msg_process(TAOS_RES* msg) {
...@@ -26,6 +27,40 @@ static void msg_process(TAOS_RES* msg) { ...@@ -26,6 +27,40 @@ static void msg_process(TAOS_RES* msg) {
printf("topic: %s\n", tmq_get_topic_name(msg)); printf("topic: %s\n", tmq_get_topic_name(msg));
printf("db: %s\n", tmq_get_db_name(msg)); printf("db: %s\n", tmq_get_db_name(msg));
printf("vg: %d\n", tmq_get_vgroup_id(msg)); printf("vg: %d\n", tmq_get_vgroup_id(msg));
if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) {
tmq_raw_data *raw = tmq_get_raw_meta(msg);
if(raw){
TAOS* pConn = taos_connect("192.168.1.86", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return;
}
TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 5");
if (taos_errno(pRes) != 0) {
printf("error in create db, reason:%s\n", taos_errstr(pRes));
return;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "use abc1");
if (taos_errno(pRes) != 0) {
printf("error in use db, reason:%s\n", taos_errstr(pRes));
return;
}
taos_free_result(pRes);
int32_t ret = taos_write_raw_meta(pConn, raw);
printf("write raw data: %s\n", tmq_err2str(ret));
taos_close(pConn);
}
tmq_free_raw_meta(raw);
char* result = tmq_get_json_meta(msg);
if(result){
printf("meta result: %s\n", result);
}
tmq_free_json_meta(result);
return;
}
while (1) { while (1) {
TAOS_ROW row = taos_fetch_row(msg); TAOS_ROW row = taos_fetch_row(msg);
if (row == NULL) break; if (row == NULL) break;
...@@ -47,7 +82,7 @@ int32_t init_env() { ...@@ -47,7 +82,7 @@ int32_t init_env() {
return -1; return -1;
} }
TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1"); TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 5");
if (taos_errno(pRes) != 0) { if (taos_errno(pRes) != 0) {
printf("error in create db, reason:%s\n", taos_errstr(pRes)); printf("error in create db, reason:%s\n", taos_errstr(pRes));
return -1; return -1;
...@@ -61,34 +96,188 @@ int32_t init_env() { ...@@ -61,34 +96,188 @@ int32_t init_env() {
} }
taos_free_result(pRes); taos_free_result(pRes);
pRes = pRes = taos_query(pConn, "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 nchar(8), t4 bool)");
taos_query(pConn, "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int)");
if (taos_errno(pRes) != 0) { if (taos_errno(pRes) != 0) {
printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes)); printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
return -1; return -1;
} }
taos_free_result(pRes); taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000)"); pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)");
if (taos_errno(pRes) != 0) { if (taos_errno(pRes) != 0) {
printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes)); printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes));
return -1; return -1;
} }
taos_free_result(pRes); taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct1 using st1 tags(2000)"); pRes = taos_query(pConn, "insert into ct0 values(now, 1, 2, 'a')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct1 using st1(t1) tags(2000)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table ct1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct2 using st1(t1) tags(NULL)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table ct2, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct1 values(now, 3, 4, 'b')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct3 using st1(t1) tags(3000)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct3 values(now, 5, 6, 'c')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table st1 add column c4 bigint");
if (taos_errno(pRes) != 0) {
printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)");
if (taos_errno(pRes) != 0) {
printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table st1 add tag t2 binary(64)");
if (taos_errno(pRes) != 0) {
printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table ct3 set tag t1=5000");
if (taos_errno(pRes) != 0) {
printf("failed to slter child table ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop table ct3 ct1");
if (taos_errno(pRes) != 0) {
printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop table st1");
if (taos_errno(pRes) != 0) { if (taos_errno(pRes) != 0) {
printf("failed to create child table tu2, reason:%s\n", taos_errstr(pRes)); printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
return -1; return -1;
} }
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))");
if (taos_errno(pRes) != 0) {
printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table n1 add column c3 bigint");
if (taos_errno(pRes) != 0) {
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table n1 modify column c2 nchar(8)");
if (taos_errno(pRes) != 0) {
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table n1 rename column c3 cc3");
if (taos_errno(pRes) != 0) {
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct3 using st1 tags(3000)"); pRes = taos_query(pConn, "alter table n1 comment 'hello'");
if (taos_errno(pRes) != 0) { if (taos_errno(pRes) != 0) {
printf("failed to create child table tu3, reason:%s\n", taos_errstr(pRes)); printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
return -1; return -1;
} }
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table n1 drop column c1");
if (taos_errno(pRes) != 0) {
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop table n1");
if (taos_errno(pRes) != 0) {
printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes); taos_free_result(pRes);
pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)");
if (taos_errno(pRes) != 0) {
printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table jt1 using jt tags('{\"k1\":1, \"k2\":\"hello\"}')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table jt2 using jt tags('')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table jt2, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 nchar(8), t4 bool)");
if (taos_errno(pRes) != 0) {
printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop table st1");
if (taos_errno(pRes) != 0) {
printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
return 0; return 0;
} }
...@@ -107,8 +296,8 @@ int32_t create_topic() { ...@@ -107,8 +296,8 @@ int32_t create_topic() {
} }
taos_free_result(pRes); taos_free_result(pRes);
/*pRes = taos_query(pConn, "create topic topic_ctb_column as database abc1");*/ pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1"); /*pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1");*/
if (taos_errno(pRes) != 0) { if (taos_errno(pRes) != 0) {
printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes)); printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
return -1; return -1;
...@@ -168,6 +357,9 @@ tmq_t* build_consumer() { ...@@ -168,6 +357,9 @@ tmq_t* build_consumer() {
tmq_conf_set(conf, "td.connect.pass", "taosdata"); tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "msg.with.table.name", "true"); tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set(conf, "enable.auto.commit", "true"); tmq_conf_set(conf, "enable.auto.commit", "true");
/*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
assert(tmq); assert(tmq);
...@@ -192,7 +384,7 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) { ...@@ -192,7 +384,7 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
} }
int32_t cnt = 0; int32_t cnt = 0;
while (running) { while (running) {
TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 0); TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, -1);
if (tmqmessage) { if (tmqmessage) {
cnt++; cnt++;
msg_process(tmqmessage); msg_process(tmqmessage);
......
Subproject commit 1c8924dc668e6aa848214c2fc54e3ace3f5bf8df Subproject commit 7ed7a97715388fa144718764d6bf20f9bfc29a12
...@@ -131,10 +131,10 @@ DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); ...@@ -131,10 +131,10 @@ DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...);
DLL_EXPORT setConfRet taos_set_config(const char *config); DLL_EXPORT setConfRet taos_set_config(const char *config);
DLL_EXPORT int taos_init(void); DLL_EXPORT int taos_init(void);
DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port);
DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port); DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
DLL_EXPORT void taos_close(TAOS *taos); DLL_EXPORT void taos_close(TAOS *taos);
const char *taos_data_type(int type); const char *taos_data_type(int type);
DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos); DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos);
DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length); DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length);
...@@ -164,6 +164,7 @@ DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql); ...@@ -164,6 +164,7 @@ DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql);
DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res); DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res);
DLL_EXPORT int taos_result_precision(TAOS_RES *res); // get the time precision of result DLL_EXPORT int taos_result_precision(TAOS_RES *res); // get the time precision of result
DLL_EXPORT void taos_free_result(TAOS_RES *res); DLL_EXPORT void taos_free_result(TAOS_RES *res);
DLL_EXPORT void taos_kill_query(TAOS *taos);
DLL_EXPORT int taos_field_count(TAOS_RES *res); DLL_EXPORT int taos_field_count(TAOS_RES *res);
DLL_EXPORT int taos_num_fields(TAOS_RES *res); DLL_EXPORT int taos_num_fields(TAOS_RES *res);
DLL_EXPORT int taos_affected_rows(TAOS_RES *res); DLL_EXPORT int taos_affected_rows(TAOS_RES *res);
...@@ -187,8 +188,8 @@ DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res); ...@@ -187,8 +188,8 @@ DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res);
DLL_EXPORT const char *taos_get_server_info(TAOS *taos); DLL_EXPORT const char *taos_get_server_info(TAOS *taos);
DLL_EXPORT const char *taos_get_client_info(); DLL_EXPORT const char *taos_get_client_info();
DLL_EXPORT const char *taos_errstr(TAOS_RES *tres); DLL_EXPORT const char *taos_errstr(TAOS_RES *res);
DLL_EXPORT int taos_errno(TAOS_RES *tres); DLL_EXPORT int taos_errno(TAOS_RES *res);
DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param); DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param);
DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param); DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param);
...@@ -252,10 +253,25 @@ DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_comm ...@@ -252,10 +253,25 @@ DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_comm
/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */ /* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); enum tmq_res_t {
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); TMQ_RES_INVALID = -1,
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); TMQ_RES_DATA = 1,
DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res); TMQ_RES_TABLE_META = 2,
};
typedef enum tmq_res_t tmq_res_t;
typedef struct tmq_raw_data tmq_raw_data;
DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
DLL_EXPORT tmq_raw_data *tmq_get_raw_meta(TAOS_RES *res);
DLL_EXPORT int32_t taos_write_raw_meta(TAOS *taos, tmq_raw_data *raw_meta);
DLL_EXPORT void tmq_free_raw_meta(tmq_raw_data *rawMeta);
DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res); // Returning null means error. Returned result need to be freed by tmq_free_json_meta
DLL_EXPORT void tmq_free_json_meta(char* jsonMeta);
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
/* ------------------------------ TMQ END -------------------------------- */ /* ------------------------------ TMQ END -------------------------------- */
......
...@@ -25,55 +25,89 @@ ...@@ -25,55 +25,89 @@
extern "C" { extern "C" {
#endif #endif
// TODO remove it
enum { enum {
TMQ_CONF__RESET_OFFSET__LATEST = -1,
TMQ_CONF__RESET_OFFSET__EARLIEAST = -2,
TMQ_CONF__RESET_OFFSET__NONE = -3, TMQ_CONF__RESET_OFFSET__NONE = -3,
TMQ_CONF__RESET_OFFSET__EARLIEAST = -2,
TMQ_CONF__RESET_OFFSET__LATEST = -1,
}; };
// clang-format off
#define IS_META_MSG(x) ( \
x == TDMT_VND_CREATE_STB \
|| x == TDMT_VND_ALTER_STB \
|| x == TDMT_VND_DROP_STB \
|| x == TDMT_VND_CREATE_TABLE \
|| x == TDMT_VND_ALTER_TABLE \
|| x == TDMT_VND_DROP_TABLE \
)
// clang-format on
enum { enum {
TMQ_MSG_TYPE__DUMMY = 0, TMQ_MSG_TYPE__DUMMY = 0,
TMQ_MSG_TYPE__POLL_RSP, TMQ_MSG_TYPE__POLL_RSP,
TMQ_MSG_TYPE__POLL_META_RSP,
TMQ_MSG_TYPE__EP_RSP, TMQ_MSG_TYPE__EP_RSP,
TMQ_MSG_TYPE__END_RSP, TMQ_MSG_TYPE__END_RSP,
}; };
enum {
STREAM_INPUT__DATA_SUBMIT = 1,
STREAM_INPUT__DATA_BLOCK,
// STREAM_INPUT__TABLE_SCAN,
STREAM_INPUT__TQ_SCAN,
STREAM_INPUT__DATA_RETRIEVE,
STREAM_INPUT__TRIGGER,
STREAM_INPUT__CHECKPOINT,
STREAM_INPUT__DROP,
};
typedef enum EStreamType { typedef enum EStreamType {
STREAM_NORMAL = 1, STREAM_NORMAL = 1,
STREAM_INVERT, STREAM_INVERT,
STREAM_REPROCESS, STREAM_CLEAR,
STREAM_INVALID, STREAM_INVALID,
STREAM_GET_ALL, STREAM_GET_ALL,
STREAM_DELETE_RESULT,
STREAM_DELETE_DATA,
STREAM_RETRIEVE,
STREAM_PULL_DATA,
STREAM_PULL_OVER,
} EStreamType; } EStreamType;
typedef struct { typedef struct {
SArray* pGroupList;
SArray* pTableList; SArray* pTableList;
SHashObj* map; // speedup acquire the tableQueryInfo by table uid SHashObj* map; // speedup acquire the tableQueryInfo by table uid
bool needSortTableByGroupId;
void* pTagCond;
void* pTagIndexCond;
uint64_t suid;
} STableListInfo; } STableListInfo;
#pragma pack(push, 1)
typedef struct SColumnDataAgg { typedef struct SColumnDataAgg {
int16_t colId; int16_t colId;
int16_t maxIndex;
int16_t minIndex;
int16_t numOfNull; int16_t numOfNull;
int64_t sum; int64_t sum;
int64_t max; int64_t max;
int64_t min; int64_t min;
} SColumnDataAgg; } SColumnDataAgg;
#pragma pack(pop)
typedef struct SDataBlockInfo { typedef struct SDataBlockInfo {
STimeWindow window; STimeWindow window;
int32_t rows; int32_t rows; // todo hide this attribute
int32_t rowSize; int32_t rowSize;
int64_t uid; // the uid of table, from which current data block comes uint64_t uid; // the uid of table, from which current data block comes
int64_t blockId; // block id, generated by physical planner uint16_t blockId; // block id, generated by physical planner
uint64_t groupId; // no need to serialize uint64_t groupId; // no need to serialize
int16_t numOfCols;
int16_t hasVarCol; int16_t hasVarCol;
int32_t capacity; uint32_t capacity;
// TODO: optimize and remove following // TODO: optimize and remove following
int32_t childId; // used for stream, do not serialize int32_t childId; // used for stream, do not serialize
EStreamType type; // used for stream, do not serialize EStreamType type; // used for stream, do not serialize
STimeWindow calWin; // used for stream, do not serialize
} SDataBlockInfo; } SDataBlockInfo;
typedef struct SSDataBlock { typedef struct SSDataBlock {
...@@ -82,6 +116,21 @@ typedef struct SSDataBlock { ...@@ -82,6 +116,21 @@ typedef struct SSDataBlock {
SDataBlockInfo info; SDataBlockInfo info;
} SSDataBlock; } SSDataBlock;
enum {
FETCH_TYPE__DATA = 1,
FETCH_TYPE__META,
FETCH_TYPE__NONE,
};
typedef struct {
int8_t fetchType;
STqOffsetVal offset;
union {
SSDataBlock data;
void* meta;
};
} SFetchRet;
typedef struct SVarColAttr { typedef struct SVarColAttr {
int32_t* offset; // start position for each entry in the list int32_t* offset; // start position for each entry in the list
uint32_t length; // used buffer size that contain the valid data uint32_t length; // used buffer size that contain the valid data
...@@ -91,7 +140,7 @@ typedef struct SVarColAttr { ...@@ -91,7 +140,7 @@ typedef struct SVarColAttr {
// pBlockAgg->numOfNull == info.rows, all data are null // pBlockAgg->numOfNull == info.rows, all data are null
// pBlockAgg->numOfNull == 0, no data are null. // pBlockAgg->numOfNull == 0, no data are null.
typedef struct SColumnInfoData { typedef struct SColumnInfoData {
SColumnInfo info; // TODO filter info needs to be removed SColumnInfo info; // column info
bool hasNull; // if current column data has null value. bool hasNull; // if current column data has null value.
char* pData; // the corresponding block data in memory char* pData; // the corresponding block data in memory
union { union {
...@@ -101,20 +150,17 @@ typedef struct SColumnInfoData { ...@@ -101,20 +150,17 @@ typedef struct SColumnInfoData {
} SColumnInfoData; } SColumnInfoData;
typedef struct SQueryTableDataCond { typedef struct SQueryTableDataCond {
// STimeWindow twindow;
uint64_t suid; uint64_t suid;
int32_t order; // desc|asc order to iterate the data block int32_t order; // desc|asc order to iterate the data block
int32_t numOfCols; int32_t numOfCols;
SColumnInfo* colList; SColumnInfo* colList;
bool loadExternalRows; // load external rows or not int32_t type; // data block load type:
int32_t type; // data block load type: // int32_t numOfTWindows;
int32_t numOfTWindows; STimeWindow twindows;
STimeWindow* twindows; int64_t startVersion;
int32_t startVersion; int64_t endVersion;
int32_t endVersion;
} SQueryTableDataCond; } SQueryTableDataCond;
void* blockDataDestroy(SSDataBlock* pBlock);
int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock); int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock);
void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock); void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock);
...@@ -122,19 +168,6 @@ int32_t tEncodeDataBlocks(void** buf, const SArray* blocks); ...@@ -122,19 +168,6 @@ int32_t tEncodeDataBlocks(void** buf, const SArray* blocks);
void* tDecodeDataBlocks(const void* buf, SArray** blocks); void* tDecodeDataBlocks(const void* buf, SArray** blocks);
void colDataDestroy(SColumnInfoData* pColData); void colDataDestroy(SColumnInfoData* pColData);
static FORCE_INLINE void blockDestroyInner(SSDataBlock* pBlock) {
int32_t numOfOutput = taosArrayGetSize(pBlock->pDataBlock);
for (int32_t i = 0; i < numOfOutput; ++i) {
SColumnInfoData* pColInfoData = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, i);
colDataDestroy(pColInfoData);
}
taosArrayDestroy(pBlock->pDataBlock);
taosMemoryFreeClear(pBlock->pBlockAgg);
}
static FORCE_INLINE void tDeleteSSDataBlock(SSDataBlock* pBlock) { blockDestroyInner(pBlock); }
//====================================================================================================================== //======================================================================================================================
// the following structure shared by parser and executor // the following structure shared by parser and executor
typedef struct SColumn { typedef struct SColumn {
......
...@@ -71,7 +71,8 @@ SEpSet getEpSet_s(SCorEpSet* pEpSet); ...@@ -71,7 +71,8 @@ SEpSet getEpSet_s(SCorEpSet* pEpSet);
#define colDataGetData(p1_, r_) \ #define colDataGetData(p1_, r_) \
((IS_VAR_DATA_TYPE((p1_)->info.type)) ? colDataGetVarData(p1_, r_) : colDataGetNumData(p1_, r_)) ((IS_VAR_DATA_TYPE((p1_)->info.type)) ? colDataGetVarData(p1_, r_) : colDataGetNumData(p1_, r_))
#define IS_JSON_NULL(type, data) ((type) == TSDB_DATA_TYPE_JSON && *(data) == TSDB_DATA_TYPE_NULL) #define IS_JSON_NULL(type, data) \
((type) == TSDB_DATA_TYPE_JSON && (*(data) == TSDB_DATA_TYPE_NULL || tTagIsJsonNull(data)))
static FORCE_INLINE bool colDataIsNull_s(const SColumnInfoData* pColumnInfoData, uint32_t row) { static FORCE_INLINE bool colDataIsNull_s(const SColumnInfoData* pColumnInfoData, uint32_t row) {
if (!pColumnInfoData->hasNull) { if (!pColumnInfoData->hasNull) {
...@@ -183,9 +184,10 @@ static FORCE_INLINE void colDataAppendDouble(SColumnInfoData* pColumnInfoData, u ...@@ -183,9 +184,10 @@ static FORCE_INLINE void colDataAppendDouble(SColumnInfoData* pColumnInfoData, u
int32_t getJsonValueLen(const char* data); int32_t getJsonValueLen(const char* data);
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull); int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, int32_t* capacity, int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, uint32_t* capacity,
const SColumnInfoData* pSource, uint32_t numOfRow2); const SColumnInfoData* pSource, uint32_t numOfRow2);
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows); int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
const SDataBlockInfo* pBlockInfo);
int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex); int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex);
int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows); int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows);
...@@ -211,7 +213,7 @@ size_t blockDataGetSerialMetaSize(uint32_t numOfCols); ...@@ -211,7 +213,7 @@ size_t blockDataGetSerialMetaSize(uint32_t numOfCols);
int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo); int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo);
int32_t blockDataSort_rv(SSDataBlock* pDataBlock, SArray* pOrderInfo, bool nullFirst); int32_t blockDataSort_rv(SSDataBlock* pDataBlock, SArray* pOrderInfo, bool nullFirst);
int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, size_t existRows, uint32_t numOfRows); int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows);
int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows); int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows);
void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows); void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows);
...@@ -220,14 +222,28 @@ void blockDataCleanup(SSDataBlock* pDataBlock); ...@@ -220,14 +222,28 @@ void blockDataCleanup(SSDataBlock* pDataBlock);
size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize); size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize);
int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n); int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n);
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n);
int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src);
int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src);
SSDataBlock* createDataBlock();
void* blockDataDestroy(SSDataBlock* pBlock);
void blockDataFreeRes(SSDataBlock* pBlock);
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData); SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData);
void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int32_t blockDataAppendColInfo(SSDataBlock* pBlock, SColumnInfoData* pColInfoData);
int8_t needCompress);
const char* blockCompressDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData); SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId);
SColumnInfoData* bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index);
void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols, int8_t needCompress);
const char* blockDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData);
void blockDebugShowData(const SArray* dataBlocks, const char* flag); void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag);
void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag);
// for debug
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId,
tb_uid_t suid); tb_uid_t suid);
...@@ -235,7 +251,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks ...@@ -235,7 +251,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId); char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) { static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
return blockDataGetSerialMetaSize(pBlock->info.numOfCols) + blockDataGetSize(pBlock); return blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)) + blockDataGetSize(pBlock);
} }
static FORCE_INLINE int32_t blockCompressColData(SColumnInfoData* pColRes, int32_t numOfRows, char* data, static FORCE_INLINE int32_t blockCompressColData(SColumnInfoData* pColRes, int32_t numOfRows, char* data,
......
...@@ -34,21 +34,44 @@ typedef struct SValue SValue; ...@@ -34,21 +34,44 @@ typedef struct SValue SValue;
typedef struct SColVal SColVal; typedef struct SColVal SColVal;
typedef struct STSRow2 STSRow2; typedef struct STSRow2 STSRow2;
typedef struct STSRowBuilder STSRowBuilder; typedef struct STSRowBuilder STSRowBuilder;
typedef struct SColData SColData;
typedef struct STagVal STagVal; typedef struct STagVal STagVal;
typedef struct STag STag; typedef struct STag STag;
// bitmap
#define N1(n) ((1 << (n)) - 1)
#define BIT1_SIZE(n) (((n)-1) / 8 + 1)
#define BIT2_SIZE(n) (((n)-1) / 4 + 1)
#define SET_BIT1(p, i, v) \
do { \
(p)[(i) / 8] &= N1((i) % 8); \
(p)[(i) / 8] |= (((uint8_t)(v)) << (((i) % 8))); \
} while (0)
#define GET_BIT1(p, i) (((p)[(i) / 8] >> ((i) % 8)) & ((uint8_t)1))
#define SET_BIT2(p, i, v) \
do { \
p[(i) / 4] &= N1((i) % 4 * 2); \
(p)[(i) / 4] |= (((uint8_t)(v)) << (((i) % 4) * 2)); \
} while (0)
#define GET_BIT2(p, i) (((p)[(i) / 4] >> (((i) % 4) * 2)) & ((uint8_t)3))
// STSchema // STSchema
int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t nCols, STSchema **ppTSchema); int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t nCols, STSchema **ppTSchema);
void tTSchemaDestroy(STSchema *pTSchema); void tTSchemaDestroy(STSchema *pTSchema);
// SValue // SValue
int tValueCmprFn(const SValue *pValue1, const SValue *pValue2, int8_t type); int32_t tPutValue(uint8_t *p, SValue *pValue, int8_t type);
int32_t tGetValue(uint8_t *p, SValue *pValue, int8_t type);
int tValueCmprFn(const SValue *pValue1, const SValue *pValue2, int8_t type);
// SColVal
#define COL_VAL_NONE(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNone = 1})
#define COL_VAL_NULL(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNull = 1})
#define COL_VAL_VALUE(CID, TYPE, V) ((SColVal){.cid = (CID), .type = (TYPE), .value = (V)})
// STSRow2 // STSRow2
#define COL_VAL_NONE(CID) ((SColVal){.cid = (CID), .isNone = 1}) #define TSROW_LEN(PROW, V) tGetI32v((uint8_t *)(PROW)->data, (V) ? &(V) : NULL)
#define COL_VAL_NULL(CID) ((SColVal){.cid = (CID), .isNull = 1}) #define TSROW_SVER(PROW, V) tGetI32v((PROW)->data + TSROW_LEN(PROW, NULL), (V) ? &(V) : NULL)
#define COL_VAL_VALUE(CID, V) ((SColVal){.cid = (CID), .value = (V)})
int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, STSRow2 **ppRow); int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, STSRow2 **ppRow);
int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow); int32_t tTSRowClone(const STSRow2 *pRow, STSRow2 **ppRow);
...@@ -56,7 +79,7 @@ void tTSRowFree(STSRow2 *pRow); ...@@ -56,7 +79,7 @@ void tTSRowFree(STSRow2 *pRow);
void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal); void tTSRowGet(STSRow2 *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray); int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray);
int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow); int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow);
int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow); int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow);
// STSRowBuilder // STSRowBuilder
#define tsRowBuilderInit() ((STSRowBuilder){0}) #define tsRowBuilderInit() ((STSRowBuilder){0})
...@@ -70,13 +93,15 @@ int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow); ...@@ -70,13 +93,15 @@ int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow);
// STag // STag
int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag); int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag);
void tTagFree(STag *pTag); void tTagFree(STag *pTag);
bool tTagIsJson(const void *pTag);
bool tTagIsJsonNull(void *tagVal);
bool tTagGet(const STag *pTag, STagVal *pTagVal); bool tTagGet(const STag *pTag, STagVal *pTagVal);
char *tTagValToData(const STagVal *pTagVal, bool isJson); char *tTagValToData(const STagVal *pTagVal, bool isJson);
int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag); int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag); int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
int32_t tTagToValArray(const STag *pTag, SArray **ppArray); int32_t tTagToValArray(const STag *pTag, SArray **ppArray);
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
void debugCheckTags(STag *pTag); // TODO: remove int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf);
// STRUCT ================= // STRUCT =================
struct STColumn { struct STColumn {
...@@ -102,16 +127,16 @@ struct STSchema { ...@@ -102,16 +127,16 @@ struct STSchema {
#define TSROW_KV_SMALL ((uint8_t)0x10U) #define TSROW_KV_SMALL ((uint8_t)0x10U)
#define TSROW_KV_MID ((uint8_t)0x20U) #define TSROW_KV_MID ((uint8_t)0x20U)
#define TSROW_KV_BIG ((uint8_t)0x40U) #define TSROW_KV_BIG ((uint8_t)0x40U)
#pragma pack(push, 1)
struct STSRow2 { struct STSRow2 {
TSKEY ts; TSKEY ts;
uint8_t flags; uint8_t flags;
int32_t sver; uint8_t data[];
uint32_t nData;
uint8_t *pData;
}; };
#pragma pack(pop)
struct STSRowBuilder { struct STSRowBuilder {
STSRow2 tsRow; // STSRow2 tsRow;
int32_t szBuf; int32_t szBuf;
uint8_t *pBuf; uint8_t *pBuf;
}; };
...@@ -138,6 +163,7 @@ struct SValue { ...@@ -138,6 +163,7 @@ struct SValue {
struct SColVal { struct SColVal {
int16_t cid; int16_t cid;
int8_t type;
int8_t isNone; int8_t isNone;
int8_t isNull; int8_t isNull;
SValue value; SValue value;
...@@ -145,6 +171,7 @@ struct SColVal { ...@@ -145,6 +171,7 @@ struct SColVal {
#pragma pack(push, 1) #pragma pack(push, 1)
struct STagVal { struct STagVal {
// char colName[TSDB_COL_NAME_LEN]; // only used for tmq_get_meta
union { union {
int16_t cid; int16_t cid;
char *pKey; char *pKey;
...@@ -170,12 +197,6 @@ struct STag { ...@@ -170,12 +197,6 @@ struct STag {
}; };
#pragma pack(pop) #pragma pack(pop)
struct SColData {
int16_t cid;
uint32_t nData;
uint8_t *pData;
};
#if 1 //================================================================================================================================================ #if 1 //================================================================================================================================================
// Imported since 3.0 and use bitmap to demonstrate None/Null/Norm, while use Null/Norm below 3.0 without of bitmap. // Imported since 3.0 and use bitmap to demonstrate None/Null/Norm, while use Null/Norm below 3.0 without of bitmap.
#define TD_SUPPORT_BITMAP #define TD_SUPPORT_BITMAP
...@@ -210,50 +231,6 @@ struct SColData { ...@@ -210,50 +231,6 @@ struct SColData {
memcpy(varDataVal(x), (str), (_size)); \ memcpy(varDataVal(x), (str), (_size)); \
} while (0); } while (0);
// ----------------- TSDB COLUMN DEFINITION
#define colType(col) ((col)->type)
#define colFlags(col) ((col)->flags)
#define colColId(col) ((col)->colId)
#define colBytes(col) ((col)->bytes)
#define colOffset(col) ((col)->offset)
#define colSetType(col, t) (colType(col) = (t))
#define colSetFlags(col, f) (colFlags(col) = (f))
#define colSetColId(col, id) (colColId(col) = (id))
#define colSetBytes(col, b) (colBytes(col) = (b))
#define colSetOffset(col, o) (colOffset(col) = (o))
// ----------------- TSDB SCHEMA DEFINITION
#define schemaNCols(s) ((s)->numOfCols)
#define schemaVersion(s) ((s)->version)
#define schemaTLen(s) ((s)->tlen)
#define schemaFLen(s) ((s)->flen)
#define schemaVLen(s) ((s)->vlen)
#define schemaColAt(s, i) ((s)->columns + i)
#define tdFreeSchema(s) taosMemoryFreeClear((s))
STSchema *tdDupSchema(const STSchema *pSchema);
int32_t tdEncodeSchema(void **buf, STSchema *pSchema);
void *tdDecodeSchema(void *buf, STSchema **pRSchema);
static FORCE_INLINE int32_t comparColId(const void *key1, const void *key2) {
if (*(int16_t *)key1 > ((STColumn *)key2)->colId) {
return 1;
} else if (*(int16_t *)key1 < ((STColumn *)key2)->colId) {
return -1;
} else {
return 0;
}
}
static FORCE_INLINE STColumn *tdGetColOfID(STSchema *pSchema, int16_t colId) {
void *ptr = bsearch(&colId, (void *)pSchema->columns, schemaNCols(pSchema), sizeof(STColumn), comparColId);
if (ptr == NULL) return NULL;
return (STColumn *)ptr;
}
// ----------------- SCHEMA BUILDER DEFINITION // ----------------- SCHEMA BUILDER DEFINITION
typedef struct { typedef struct {
int32_t tCols; int32_t tCols;
...@@ -283,141 +260,6 @@ void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version) ...@@ -283,141 +260,6 @@ void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version)
int32_t tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int8_t flags, col_id_t colId, col_bytes_t bytes); int32_t tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int8_t flags, col_id_t colId, col_bytes_t bytes);
STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder); STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
// ----------------- Semantic timestamp key definition
// typedef uint64_t TKEY;
#define TKEY TSKEY
#define TKEY_INVALID UINT64_MAX
#define TKEY_NULL TKEY_INVALID
#define TKEY_NEGATIVE_FLAG (((TKEY)1) << 63)
#define TKEY_VALUE_FILTER (~(TKEY_NEGATIVE_FLAG))
#define TKEY_IS_NEGATIVE(tkey) (((tkey)&TKEY_NEGATIVE_FLAG) != 0)
#define TKEY_IS_DELETED(tkey) (false)
#define tdGetTKEY(key) (key)
#define tdGetKey(tskey) (tskey)
#define MIN_TS_KEY ((TSKEY)0x8000000000000001)
#define MAX_TS_KEY ((TSKEY)0x7fffffffffffffff)
#define TD_TO_TKEY(key) tdGetTKEY(((key) < MIN_TS_KEY) ? MIN_TS_KEY : (((key) > MAX_TS_KEY) ? MAX_TS_KEY : key))
static FORCE_INLINE TKEY keyToTkey(TSKEY key) {
TSKEY lkey = key;
if (key > MAX_TS_KEY) {
lkey = MAX_TS_KEY;
} else if (key < MIN_TS_KEY) {
lkey = MIN_TS_KEY;
}
return tdGetTKEY(lkey);
}
static FORCE_INLINE int32_t tkeyComparFn(const void *tkey1, const void *tkey2) {
TSKEY key1 = tdGetKey(*(TKEY *)tkey1);
TSKEY key2 = tdGetKey(*(TKEY *)tkey2);
if (key1 < key2) {
return -1;
} else if (key1 > key2) {
return 1;
} else {
return 0;
}
}
// ----------------- Data column structure
// SDataCol arrangement: data => bitmap => dataOffset
typedef struct SDataCol {
int8_t type; // column type
uint8_t bitmap : 1; // 0: no bitmap if all rows are NORM, 1: has bitmap if has NULL/NORM rows
uint8_t reserve : 7;
int16_t colId; // column ID
int32_t bytes; // column data bytes defined
int32_t offset; // data offset in a SDataRow (including the header size)
int32_t spaceSize; // Total space size for this column
int32_t len; // column data length
VarDataOffsetT *dataOff; // For binary and nchar data, the offset in the data column
void *pData; // Actual data pointer
void *pBitmap; // Bitmap pointer
TSKEY ts; // only used in last NULL column
} SDataCol;
#define isAllRowsNull(pCol) ((pCol)->len == 0)
#define isAllRowsNone(pCol) ((pCol)->len == 0)
static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
int32_t tdAllocMemForCol(SDataCol *pCol, int32_t maxPoints);
void dataColInit(SDataCol *pDataCol, STColumn *pCol, int32_t maxPoints);
int32_t dataColAppendVal(SDataCol *pCol, const void *value, int32_t numOfRows, int32_t maxPoints);
void *dataColSetOffset(SDataCol *pCol, int32_t nEle);
bool isNEleNull(SDataCol *pCol, int32_t nEle);
typedef struct {
col_id_t maxCols; // max number of columns
col_id_t numOfCols; // Total number of cols
int32_t maxPoints; // max number of points
int32_t numOfRows;
int32_t bitmapMode : 1; // default is 0(2 bits), otherwise 1(1 bit)
int32_t sversion : 31; // TODO: set sversion(not used yet)
SDataCol *cols;
} SDataCols;
static FORCE_INLINE bool tdDataColsIsBitmapI(SDataCols *pCols) { return pCols->bitmapMode != TSDB_BITMODE_DEFAULT; }
static FORCE_INLINE void tdDataColsSetBitmapI(SDataCols *pCols) { pCols->bitmapMode = TSDB_BITMODE_ONE_BIT; }
static FORCE_INLINE bool tdIsBitmapModeI(int8_t bitmapMode) { return bitmapMode != TSDB_BITMODE_DEFAULT; }
#define keyCol(pCols) (&((pCols)->cols[0])) // Key column
#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)] // the idx row of column-wised data
#define dataColsKeyAt(pCols, idx) tdGetKey(dataColsTKeyAt(pCols, idx))
static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsTKeyAt(pCols, 0);
} else {
return TKEY_INVALID;
}
}
static FORCE_INLINE TSKEY dataColsKeyAtRow(SDataCols *pCols, int32_t row) {
assert(row < pCols->numOfRows);
return dataColsKeyAt(pCols, row);
}
static FORCE_INLINE TSKEY dataColsKeyFirst(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsKeyAt(pCols, 0);
} else {
return TSDB_DATA_TIMESTAMP_NULL;
}
}
static FORCE_INLINE TKEY dataColsTKeyLast(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsTKeyAt(pCols, pCols->numOfRows - 1);
} else {
return TKEY_INVALID;
}
}
static FORCE_INLINE TSKEY dataColsKeyLast(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsKeyAt(pCols, pCols->numOfRows - 1);
} else {
return TSDB_DATA_TIMESTAMP_NULL;
}
}
SDataCols *tdNewDataCols(int32_t maxCols, int32_t maxRows);
void tdResetDataCols(SDataCols *pCols);
int32_t tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData);
SDataCols *tdFreeDataCols(SDataCols *pCols);
int32_t tdMergeDataCols(SDataCols *target, SDataCols *source, int32_t rowsToMerge, int32_t *pOffset, bool update,
TDRowVerT maxVer);
#endif #endif
#ifdef __cplusplus #ifdef __cplusplus
......
...@@ -64,10 +64,10 @@ extern int32_t tsNumOfMnodeQueryThreads; ...@@ -64,10 +64,10 @@ extern int32_t tsNumOfMnodeQueryThreads;
extern int32_t tsNumOfMnodeFetchThreads; extern int32_t tsNumOfMnodeFetchThreads;
extern int32_t tsNumOfMnodeReadThreads; extern int32_t tsNumOfMnodeReadThreads;
extern int32_t tsNumOfVnodeQueryThreads; extern int32_t tsNumOfVnodeQueryThreads;
extern int32_t tsNumOfVnodeStreamThreads;
extern int32_t tsNumOfVnodeFetchThreads; extern int32_t tsNumOfVnodeFetchThreads;
extern int32_t tsNumOfVnodeWriteThreads; extern int32_t tsNumOfVnodeWriteThreads;
extern int32_t tsNumOfVnodeSyncThreads; extern int32_t tsNumOfVnodeSyncThreads;
extern int32_t tsNumOfVnodeMergeThreads;
extern int32_t tsNumOfQnodeQueryThreads; extern int32_t tsNumOfQnodeQueryThreads;
extern int32_t tsNumOfQnodeFetchThreads; extern int32_t tsNumOfQnodeFetchThreads;
extern int32_t tsNumOfSnodeSharedThreads; extern int32_t tsNumOfSnodeSharedThreads;
...@@ -137,6 +137,8 @@ extern bool tsSmlDataFormat; ...@@ -137,6 +137,8 @@ extern bool tsSmlDataFormat;
// internal // internal
extern int32_t tsTransPullupInterval; extern int32_t tsTransPullupInterval;
extern int32_t tsMqRebalanceInterval; extern int32_t tsMqRebalanceInterval;
extern int32_t tsTtlUnit;
extern int32_t tsTtlPushInterval;
#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
...@@ -149,6 +151,7 @@ void taosCfgDynamicOptions(const char *option, const char *value); ...@@ -149,6 +151,7 @@ void taosCfgDynamicOptions(const char *option, const char *value);
void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary); void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary);
struct SConfig *taosGetCfg(); struct SConfig *taosGetCfg();
int32_t taosSetCfg(SConfig *pCfg, char* name);
#ifdef __cplusplus #ifdef __cplusplus
} }
......
此差异已折叠。
...@@ -34,6 +34,7 @@ typedef enum { ...@@ -34,6 +34,7 @@ typedef enum {
WRITE_QUEUE, WRITE_QUEUE,
APPLY_QUEUE, APPLY_QUEUE,
SYNC_QUEUE, SYNC_QUEUE,
STREAM_QUEUE,
QUEUE_MAX, QUEUE_MAX,
} EQueueType; } EQueueType;
......
...@@ -81,6 +81,8 @@ enum { ...@@ -81,6 +81,8 @@ enum {
TD_DEF_MSG_TYPE(TDMT_DND_SERVER_STATUS, "server-status", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_SERVER_STATUS, "server-status", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_NET_TEST, "net-test", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_NET_TEST, "net-test", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_CONFIG_DNODE, "config-dnode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_CONFIG_DNODE, "config-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_SYSTABLE_RETRIEVE, "dnode-retrieve", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_MAX_MSG, "dnd-max", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_MND_MSG) TD_NEW_MSG_SEG(TDMT_MND_MSG)
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
...@@ -101,6 +103,7 @@ enum { ...@@ -101,6 +103,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_QNODE, "alter-qnode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_ALTER_QNODE, "alter-qnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_QNODE, "drop-qnode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_DROP_QNODE, "drop-qnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_QNODE_LIST, "qnode-list", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_QNODE_LIST, "qnode-list", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DNODE_LIST, "dnode-list", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_SNODE, "create-snode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_CREATE_SNODE, "create-snode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_SNODE, "alter-snode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_ALTER_SNODE, "alter-snode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_SNODE, "drop-snode", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_DROP_SNODE, "drop-snode", NULL, NULL)
...@@ -113,6 +116,7 @@ enum { ...@@ -113,6 +116,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_DB, "alter-db", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_ALTER_DB, "alter-db", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SYNC_DB, "sync-db", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SYNC_DB, "sync-db", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_COMPACT_DB, "compact-db", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_COMPACT_DB, "compact-db", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB, "trim-db", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_DB_CFG, "get-db-cfg", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_GET_DB_CFG, "get-db-cfg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_VGROUP_LIST, "vgroup-list", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_VGROUP_LIST, "vgroup-list", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_FUNC, "create-func", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_CREATE_FUNC, "create-func", NULL, NULL)
...@@ -131,6 +135,7 @@ enum { ...@@ -131,6 +135,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_DROP_INDEX, "drop-index", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_DROP_INDEX, "drop-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_INDEX, "get-index", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_GET_INDEX, "get-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_TABLE_INDEX, "get-table-index", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_GET_TABLE_INDEX, "get-table-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TABLE_CFG, "table-cfg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_TOPIC, "create-topic", SMCreateTopicReq, SMCreateTopicRsp) TD_DEF_MSG_TYPE(TDMT_MND_CREATE_TOPIC, "create-topic", SMCreateTopicReq, SMCreateTopicRsp)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_TOPIC, "alter-topic", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_ALTER_TOPIC, "alter-topic", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_TOPIC, "drop-topic", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_DROP_TOPIC, "drop-topic", NULL, NULL)
...@@ -144,13 +149,14 @@ enum { ...@@ -144,13 +149,14 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_MQ_TIMER, "mq-tmr", SMTimerReq, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MQ_TIMER, "mq-tmr", SMTimerReq, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TELEM_TIMER, "telem-tmr", SMTimerReq, SMTimerReq) TD_DEF_MSG_TYPE(TDMT_MND_TELEM_TIMER, "telem-tmr", SMTimerReq, SMTimerReq)
TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "trans-tmr", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "trans-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TTL_TIMER, "ttl-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_KILL_TRANS, "kill-trans", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_KILL_TRANS, "kill-trans", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_KILL_QUERY, "kill-query", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_KILL_QUERY, "kill-query", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_KILL_CONN, "kill-conn", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_KILL_CONN, "kill-conn", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_HEARTBEAT, "heartbeat", SClientHbBatchReq, SClientHbBatchRsp) TD_DEF_MSG_TYPE(TDMT_MND_HEARTBEAT, "heartbeat", SClientHbBatchReq, SClientHbBatchRsp)
TD_DEF_MSG_TYPE(TDMT_MND_STATUS, "status", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_STATUS, "status", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SHOW, "show", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SHOW, "show", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SYSTABLE_RETRIEVE, "retrieve", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SYSTABLE_RETRIEVE, "mnd-retrieve", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GRANT, "grant", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_GRANT, "grant", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_AUTH, "auth", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_AUTH, "auth", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_APPLY_MSG, "mnode-apply", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_APPLY_MSG, "mnode-apply", NULL, NULL)
...@@ -158,36 +164,32 @@ enum { ...@@ -158,36 +164,32 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_MERGE_VGROUP, "merge-vgroup", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MERGE_VGROUP, "merge-vgroup", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_REDISTRIBUTE_VGROUP, "redistribute-vgroup", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_REDISTRIBUTE_VGROUP, "redistribute-vgroup", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SPLIT_VGROUP, "split-vgroup", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SPLIT_VGROUP, "split-vgroup", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SHOW_VARIABLES, "show-variables", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_MSG) TD_NEW_MSG_SEG(TDMT_VND_MSG)
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp) TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
TD_DEF_MSG_TYPE(TDMT_VND_QUERY, "query", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_QUERY_CONTINUE, "query-continue", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_QUERY_HEARTBEAT, "query-heartbeat", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_FETCH, "fetch", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TABLE, "create-table", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TABLE, "create-table", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_TABLE, "alter-table", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_ALTER_TABLE, "alter-table", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TABLE, "drop-table", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_TABLE, "drop-table", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_UPDATE_TAG_VAL, "update-tag-val", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_UPDATE_TAG_VAL, "update-tag-val", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TABLE_META, "vnode-table-meta", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TABLE_META, "vnode-table-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TABLES_META, "vnode-tables-meta", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TABLES_META, "vnode-tables-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TABLE_CFG, "vnode-table-cfg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_STB, "vnode-create-stb", SVCreateStbReq, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CREATE_STB, "vnode-create-stb", SVCreateStbReq, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_STB, "vnode-alter-stb", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_ALTER_STB, "vnode-alter-stb", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_STB, "vnode-drop-stb", SVDropStbReq, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_STB, "vnode-drop-stb", SVDropStbReq, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_CHANGE, "vnode-mq-vg-change", SMqRebVgReq, SMqRebVgRsp) TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_CHANGE, "vnode-mq-vg-change", SMqRebVgReq, SMqRebVgRsp)
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_DELETE, "vnode-mq-vg-delete", SMqVDeleteReq, SMqVDeleteRsp) TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_DELETE, "vnode-mq-vg-delete", SMqVDeleteReq, SMqVDeleteRsp)
TD_DEF_MSG_TYPE(TDMT_VND_MQ_COMMIT_OFFSET, "vnode-commit-offset", STqOffset, STqOffset) TD_DEF_MSG_TYPE(TDMT_VND_MQ_COMMIT_OFFSET, "vnode-commit-offset", STqOffset, STqOffset)
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_TASK, "vnode-cancel-task", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TASK, "vnode-drop-task", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TOPIC, "vnode-create-topic", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TOPIC, "vnode-create-topic", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_TOPIC, "vnode-alter-topic", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_ALTER_TOPIC, "vnode-alter-topic", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TOPIC, "vnode-drop-topic", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_TOPIC, "vnode-drop-topic", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_EXPLAIN, "vnode-explain", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SUBSCRIBE, "vnode-subscribe", SMVSubscribeReq, SMVSubscribeRsp) TD_DEF_MSG_TYPE(TDMT_VND_SUBSCRIBE, "vnode-subscribe", SMVSubscribeReq, SMVSubscribeRsp)
TD_DEF_MSG_TYPE(TDMT_VND_CONSUME, "vnode-consume", SMqPollReq, SMqDataBlkRsp) TD_DEF_MSG_TYPE(TDMT_VND_CONSUME, "vnode-consume", SMqPollReq, SMqDataBlkRsp)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_DISPATCH_WRITE, "vnode-stream-task-dispatch-write", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_STREAM_DISPATCH_WRITE, "vnode-stream-task-dispatch-write", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_DROP, "vnode-stream-task-drop", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_SMA, "vnode-create-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CREATE_SMA, "vnode-create-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL)
...@@ -197,19 +199,33 @@ enum { ...@@ -197,19 +199,33 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "alter-replica", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "alter-replica", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIRM, "alter-confirm", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIRM, "alter-confirm", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_HASHRANGE, "alter-hashrange", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_ALTER_HASHRANGE, "alter-hashrange", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_COMPACT, "compact", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_COMPACT, "vnode-compact", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TTL_TABLE, "drop-ttl-stb", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_TTL_TABLE, "vnode-drop-ttl-stb", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_QND_MSG) TD_DEF_MSG_TYPE(TDMT_VND_TRIM, "vnode-trim", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_COMMIT, "vnode-commit", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_MAX_MSG, "vnd-max", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_SCH_MSG)
TD_DEF_MSG_TYPE(TDMT_SCH_QUERY, "query", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_MERGE_QUERY, "merge-query", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_QUERY_CONTINUE, "query-continue", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_QUERY_HEARTBEAT, "query-heartbeat", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_FETCH, "fetch", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_MERGE_FETCH, "merge-fetch", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_CANCEL_TASK, "cancel-task", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_DROP_TASK, "drop-task", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_EXPLAIN, "explain", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_MAX_MSG, "sch-max", NULL, NULL)
//shared by snode and vnode
TD_NEW_MSG_SEG(TDMT_STREAM_MSG) TD_NEW_MSG_SEG(TDMT_STREAM_MSG)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DEPLOY, "stream-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp) TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DEPLOY, "stream-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DROP, "stream-task-drop", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RUN, "stream-task-run", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RUN, "stream-task-run", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DISPATCH, "stream-task-dispatch", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DISPATCH, "stream-task-dispatch", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RECOVER, "stream-task-recover", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RECOVER, "stream-task-recover", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_SCH_MSG) TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_MON_MSG) TD_NEW_MSG_SEG(TDMT_MON_MSG)
TD_DEF_MSG_TYPE(TDMT_MON_MM_INFO, "monitor-minfo", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MON_MM_INFO, "monitor-minfo", NULL, NULL)
...@@ -220,16 +236,19 @@ enum { ...@@ -220,16 +236,19 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MON_VM_LOAD, "monitor-vload", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MON_VM_LOAD, "monitor-vload", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MON_MM_LOAD, "monitor-mload", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MON_MM_LOAD, "monitor-mload", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MON_QM_LOAD, "monitor-qload", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MON_QM_LOAD, "monitor-qload", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_SYNC_MSG) TD_NEW_MSG_SEG(TDMT_SYNC_MSG)
TD_DEF_MSG_TYPE(TDMT_SYNC_TIMEOUT, "sync-timer", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_TIMEOUT, "sync-timer", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_PING, "sync-ping", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_PING, "sync-ping", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_PING_REPLY, "sync-ping-reply", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_PING_REPLY, "sync-ping-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_CLIENT_REQUEST, "sync-client-request", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_CLIENT_REQUEST, "sync-client-request", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_CLIENT_REQUEST_BATCH, "sync-client-request-batch", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_CLIENT_REQUEST_REPLY, "sync-client-request-reply", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_CLIENT_REQUEST_REPLY, "sync-client-request-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_REQUEST_VOTE, "sync-request-vote", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_REQUEST_VOTE, "sync-request-vote", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_REQUEST_VOTE_REPLY, "sync-request-vote-reply", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_REQUEST_VOTE_REPLY, "sync-request-vote-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_APPEND_ENTRIES, "sync-append-entries", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_APPEND_ENTRIES, "sync-append-entries", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_APPEND_ENTRIES_BATCH, "sync-append-entries-batch", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_APPEND_ENTRIES_REPLY, "sync-append-entries-reply", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_APPEND_ENTRIES_REPLY, "sync-append-entries-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_NOOP, "sync-noop", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_NOOP, "sync-noop", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_UNKNOWN, "sync-unknown", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_UNKNOWN, "sync-unknown", NULL, NULL)
...@@ -242,6 +261,7 @@ enum { ...@@ -242,6 +261,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_SYNC_LEADER_TRANSFER, "sync-leader-transfer", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_LEADER_TRANSFER, "sync-leader-transfer", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_SET_MNODE_STANDBY, "set-mnode-standby", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_SET_MNODE_STANDBY, "set-mnode-standby", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_SET_VNODE_STANDBY, "set-vnode-standby", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_SET_VNODE_STANDBY, "set-vnode-standby", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
#if defined(TD_MSG_NUMBER_) #if defined(TD_MSG_NUMBER_)
TDMT_MAX TDMT_MAX
......
...@@ -37,6 +37,8 @@ typedef struct SName { ...@@ -37,6 +37,8 @@ typedef struct SName {
char tname[TSDB_TABLE_NAME_LEN]; char tname[TSDB_TABLE_NAME_LEN];
} SName; } SName;
SName* toName(int32_t acctId, const char* pDbName, const char* pTableName, SName* pName);
int32_t tNameExtractFullName(const SName* name, char* dst); int32_t tNameExtractFullName(const SName* name, char* dst);
int32_t tNameLen(const SName* name); int32_t tNameLen(const SName* name);
...@@ -57,12 +59,16 @@ void tNameAssign(SName* dst, const SName* src); ...@@ -57,12 +59,16 @@ void tNameAssign(SName* dst, const SName* src);
int32_t tNameSetDbName(SName* dst, int32_t acctId, const char* dbName, size_t nameLen); int32_t tNameSetDbName(SName* dst, int32_t acctId, const char* dbName, size_t nameLen);
int32_t tNameAddTbName(SName* dst, const char* tbName, size_t nameLen);
int32_t tNameFromString(SName* dst, const char* str, uint32_t type); int32_t tNameFromString(SName* dst, const char* str, uint32_t type);
int32_t tNameSetAcctId(SName* dst, int32_t acctId); int32_t tNameSetAcctId(SName* dst, int32_t acctId);
bool tNameDBNameEqual(SName* left, SName* right); bool tNameDBNameEqual(SName* left, SName* right);
bool tNameTbNameEqual(SName* left, SName* right);
typedef struct { typedef struct {
// input // input
SArray* tags; // element is SSmlKv SArray* tags; // element is SSmlKv
......
...@@ -168,7 +168,7 @@ typedef struct { ...@@ -168,7 +168,7 @@ typedef struct {
// N.B. If without STSchema, getExtendedRowSize() is used to get the rowMaxBytes and // N.B. If without STSchema, getExtendedRowSize() is used to get the rowMaxBytes and
// (int32_t)ceil((double)nCols/TD_VTYPE_PARTS) should be added if TD_SUPPORT_BITMAP defined. // (int32_t)ceil((double)nCols/TD_VTYPE_PARTS) should be added if TD_SUPPORT_BITMAP defined.
#define TD_ROW_MAX_BYTES_FROM_SCHEMA(s) (schemaTLen(s) + TD_ROW_HEAD_LEN) #define TD_ROW_MAX_BYTES_FROM_SCHEMA(s) ((s)->tlen + TD_ROW_HEAD_LEN)
#define TD_ROW_SET_INFO(r, i) (TD_ROW_INFO(r) = (i)) #define TD_ROW_SET_INFO(r, i) (TD_ROW_INFO(r) = (i))
#define TD_ROW_SET_TYPE(r, t) (TD_ROW_TYPE(r) = (t)) #define TD_ROW_SET_TYPE(r, t) (TD_ROW_TYPE(r) = (t))
...@@ -223,9 +223,10 @@ int32_t tdSetBitmapValTypeN(void *pBitmap, int16_t nEle, TDR ...@@ -223,9 +223,10 @@ int32_t tdSetBitmapValTypeN(void *pBitmap, int16_t nEle, TDR
static FORCE_INLINE int32_t tdGetBitmapValType(const void *pBitmap, int16_t colIdx, TDRowValT *pValType, static FORCE_INLINE int32_t tdGetBitmapValType(const void *pBitmap, int16_t colIdx, TDRowValT *pValType,
int8_t bitmapMode); int8_t bitmapMode);
bool tdIsBitmapBlkNorm(const void *pBitmap, int32_t numOfBits, int8_t bitmapMode); bool tdIsBitmapBlkNorm(const void *pBitmap, int32_t numOfBits, int8_t bitmapMode);
int32_t tdAppendValToDataCol(SDataCol *pCol, TDRowValT valType, const void *val, int32_t numOfRows, int32_t maxPoints, // int32_t tdAppendValToDataCol(SDataCol *pCol, TDRowValT valType, const void *val, int32_t numOfRows, int32_t
int8_t bitmapMode, bool isMerge); // maxPoints,
int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge); // int8_t bitmapMode, bool isMerge);
// int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge);
int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pValType); int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pValType);
int32_t tdSetBitmapValTypeI(void *pBitmap, int16_t colIdx, TDRowValT valType); int32_t tdSetBitmapValTypeI(void *pBitmap, int16_t colIdx, TDRowValT valType);
...@@ -299,6 +300,7 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp ...@@ -299,6 +300,7 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp
int32_t tdGetTpRowValOfCol(SCellVal *output, STSRow *pRow, void *pBitmap, int8_t colType, int32_t offset, int32_t tdGetTpRowValOfCol(SCellVal *output, STSRow *pRow, void *pBitmap, int8_t colType, int32_t offset,
int16_t colIdx); int16_t colIdx);
int32_t tdGetKvRowValOfCol(SCellVal *output, STSRow *pRow, void *pBitmap, int32_t offset, int16_t colIdx); int32_t tdGetKvRowValOfCol(SCellVal *output, STSRow *pRow, void *pBitmap, int32_t offset, int16_t colIdx);
void tTSRowGetVal(STSRow *pRow, STSchema *pTSchema, int16_t iCol, SColVal *pColVal);
typedef struct { typedef struct {
STSchema *pSchema; STSchema *pSchema;
...@@ -312,16 +314,14 @@ typedef struct { ...@@ -312,16 +314,14 @@ typedef struct {
void tdSTSRowIterReset(STSRowIter *pIter, STSRow *pRow); void tdSTSRowIterReset(STSRowIter *pIter, STSRow *pRow);
void tdSTSRowIterInit(STSRowIter *pIter, STSchema *pSchema); void tdSTSRowIterInit(STSRowIter *pIter, STSchema *pSchema);
int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow);
bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal); bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal);
bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal); bool tdGetTpRowDataOfCol(STSRowIter *pIter, col_type_t colType, int32_t offset, SCellVal *pVal);
bool tdGetKvRowValOfColEx(STSRowIter *pIter, col_id_t colId, col_type_t colType, col_id_t *nIdx, SCellVal *pVal); bool tdGetKvRowValOfColEx(STSRowIter *pIter, col_id_t colId, col_type_t colType, col_id_t *nIdx, SCellVal *pVal);
bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal); bool tdSTSRowIterNext(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCellVal *pVal);
STSRow *mergeTwoRows(void *buffer, STSRow *row1, STSRow *row2, STSchema *pSchema1, STSchema *pSchema2);
int32_t tdGetColDataOfRow(SCellVal *pVal, SDataCol *pCol, int32_t row, int8_t bitmapMode);
bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t flen, uint32_t offset, col_id_t colIdx, bool tdSTpRowGetVal(STSRow *pRow, col_id_t colId, col_type_t colType, int32_t flen, uint32_t offset, col_id_t colIdx,
SCellVal *pVal); SCellVal *pVal);
bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, uint32_t offset, col_id_t colIdx, SCellVal *pVal); bool tdSKvRowGetVal(STSRow *pRow, col_id_t colId, col_id_t colIdx, SCellVal *pVal);
int32_t dataColGetNEleLen(SDataCol *pDataCol, int32_t rows, int8_t bitmapMode);
void tdSCellValPrint(SCellVal *pVal, int8_t colType); void tdSCellValPrint(SCellVal *pVal, int8_t colType);
void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag); void tdSRowPrint(STSRow *row, STSchema *pSchema, const char *tag);
......
...@@ -72,6 +72,8 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) { ...@@ -72,6 +72,8 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
} }
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision); int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
int64_t taosTimeSub(int64_t t, int64_t duration, char unit, int32_t precision);
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision); int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision); int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
......
...@@ -16,247 +16,261 @@ ...@@ -16,247 +16,261 @@
#ifndef _TD_COMMON_TOKEN_H_ #ifndef _TD_COMMON_TOKEN_H_
#define _TD_COMMON_TOKEN_H_ #define _TD_COMMON_TOKEN_H_
#define TK_OR 1 #define TK_OR 1
#define TK_AND 2 #define TK_AND 2
#define TK_UNION 3 #define TK_UNION 3
#define TK_ALL 4 #define TK_ALL 4
#define TK_MINUS 5 #define TK_MINUS 5
#define TK_EXCEPT 6 #define TK_EXCEPT 6
#define TK_INTERSECT 7 #define TK_INTERSECT 7
#define TK_NK_BITAND 8 #define TK_NK_BITAND 8
#define TK_NK_BITOR 9 #define TK_NK_BITOR 9
#define TK_NK_LSHIFT 10 #define TK_NK_LSHIFT 10
#define TK_NK_RSHIFT 11 #define TK_NK_RSHIFT 11
#define TK_NK_PLUS 12 #define TK_NK_PLUS 12
#define TK_NK_MINUS 13 #define TK_NK_MINUS 13
#define TK_NK_STAR 14 #define TK_NK_STAR 14
#define TK_NK_SLASH 15 #define TK_NK_SLASH 15
#define TK_NK_REM 16 #define TK_NK_REM 16
#define TK_NK_CONCAT 17 #define TK_NK_CONCAT 17
#define TK_CREATE 18 #define TK_CREATE 18
#define TK_ACCOUNT 19 #define TK_ACCOUNT 19
#define TK_NK_ID 20 #define TK_NK_ID 20
#define TK_PASS 21 #define TK_PASS 21
#define TK_NK_STRING 22 #define TK_NK_STRING 22
#define TK_ALTER 23 #define TK_ALTER 23
#define TK_PPS 24 #define TK_PPS 24
#define TK_TSERIES 25 #define TK_TSERIES 25
#define TK_STORAGE 26 #define TK_STORAGE 26
#define TK_STREAMS 27 #define TK_STREAMS 27
#define TK_QTIME 28 #define TK_QTIME 28
#define TK_DBS 29 #define TK_DBS 29
#define TK_USERS 30 #define TK_USERS 30
#define TK_CONNS 31 #define TK_CONNS 31
#define TK_STATE 32 #define TK_STATE 32
#define TK_USER 33 #define TK_USER 33
#define TK_PRIVILEGE 34 #define TK_ENABLE 34
#define TK_DROP 35 #define TK_NK_INTEGER 35
#define TK_GRANT 36 #define TK_SYSINFO 36
#define TK_ON 37 #define TK_DROP 37
#define TK_TO 38 #define TK_GRANT 38
#define TK_REVOKE 39 #define TK_ON 39
#define TK_FROM 40 #define TK_TO 40
#define TK_NK_COMMA 41 #define TK_REVOKE 41
#define TK_READ 42 #define TK_FROM 42
#define TK_WRITE 43 #define TK_NK_COMMA 43
#define TK_NK_DOT 44 #define TK_READ 44
#define TK_DNODE 45 #define TK_WRITE 45
#define TK_PORT 46 #define TK_NK_DOT 46
#define TK_NK_INTEGER 47 #define TK_DNODE 47
#define TK_DNODES 48 #define TK_PORT 48
#define TK_NK_IPTOKEN 49 #define TK_DNODES 49
#define TK_LOCAL 50 #define TK_NK_IPTOKEN 50
#define TK_QNODE 51 #define TK_LOCAL 51
#define TK_BNODE 52 #define TK_QNODE 52
#define TK_SNODE 53 #define TK_BNODE 53
#define TK_MNODE 54 #define TK_SNODE 54
#define TK_DATABASE 55 #define TK_MNODE 55
#define TK_USE 56 #define TK_DATABASE 56
#define TK_IF 57 #define TK_USE 57
#define TK_NOT 58 #define TK_FLUSH 58
#define TK_EXISTS 59 #define TK_TRIM 59
#define TK_BUFFER 60 #define TK_IF 60
#define TK_CACHELAST 61 #define TK_NOT 61
#define TK_COMP 62 #define TK_EXISTS 62
#define TK_DURATION 63 #define TK_BUFFER 63
#define TK_NK_VARIABLE 64 #define TK_CACHELAST 64
#define TK_FSYNC 65 #define TK_CACHELASTSIZE 65
#define TK_MAXROWS 66 #define TK_COMP 66
#define TK_MINROWS 67 #define TK_DURATION 67
#define TK_KEEP 68 #define TK_NK_VARIABLE 68
#define TK_PAGES 69 #define TK_FSYNC 69
#define TK_PAGESIZE 70 #define TK_MAXROWS 70
#define TK_PRECISION 71 #define TK_MINROWS 71
#define TK_REPLICA 72 #define TK_KEEP 72
#define TK_STRICT 73 #define TK_PAGES 73
#define TK_WAL 74 #define TK_PAGESIZE 74
#define TK_VGROUPS 75 #define TK_PRECISION 75
#define TK_SINGLE_STABLE 76 #define TK_REPLICA 76
#define TK_RETENTIONS 77 #define TK_STRICT 77
#define TK_SCHEMALESS 78 #define TK_WAL 78
#define TK_NK_COLON 79 #define TK_VGROUPS 79
#define TK_TABLE 80 #define TK_SINGLE_STABLE 80
#define TK_NK_LP 81 #define TK_RETENTIONS 81
#define TK_NK_RP 82 #define TK_SCHEMALESS 82
#define TK_STABLE 83 #define TK_NK_COLON 83
#define TK_ADD 84 #define TK_TABLE 84
#define TK_COLUMN 85 #define TK_NK_LP 85
#define TK_MODIFY 86 #define TK_NK_RP 86
#define TK_RENAME 87 #define TK_STABLE 87
#define TK_TAG 88 #define TK_ADD 88
#define TK_SET 89 #define TK_COLUMN 89
#define TK_NK_EQ 90 #define TK_MODIFY 90
#define TK_USING 91 #define TK_RENAME 91
#define TK_TAGS 92 #define TK_TAG 92
#define TK_COMMENT 93 #define TK_SET 93
#define TK_BOOL 94 #define TK_NK_EQ 94
#define TK_TINYINT 95 #define TK_USING 95
#define TK_SMALLINT 96 #define TK_TAGS 96
#define TK_INT 97 #define TK_COMMENT 97
#define TK_INTEGER 98 #define TK_BOOL 98
#define TK_BIGINT 99 #define TK_TINYINT 99
#define TK_FLOAT 100 #define TK_SMALLINT 100
#define TK_DOUBLE 101 #define TK_INT 101
#define TK_BINARY 102 #define TK_INTEGER 102
#define TK_TIMESTAMP 103 #define TK_BIGINT 103
#define TK_NCHAR 104 #define TK_FLOAT 104
#define TK_UNSIGNED 105 #define TK_DOUBLE 105
#define TK_JSON 106 #define TK_BINARY 106
#define TK_VARCHAR 107 #define TK_TIMESTAMP 107
#define TK_MEDIUMBLOB 108 #define TK_NCHAR 108
#define TK_BLOB 109 #define TK_UNSIGNED 109
#define TK_VARBINARY 110 #define TK_JSON 110
#define TK_DECIMAL 111 #define TK_VARCHAR 111
#define TK_MAX_DELAY 112 #define TK_MEDIUMBLOB 112
#define TK_WATERMARK 113 #define TK_BLOB 113
#define TK_ROLLUP 114 #define TK_VARBINARY 114
#define TK_TTL 115 #define TK_DECIMAL 115
#define TK_SMA 116 #define TK_MAX_DELAY 116
#define TK_FIRST 117 #define TK_WATERMARK 117
#define TK_LAST 118 #define TK_ROLLUP 118
#define TK_SHOW 119 #define TK_TTL 119
#define TK_DATABASES 120 #define TK_SMA 120
#define TK_TABLES 121 #define TK_FIRST 121
#define TK_STABLES 122 #define TK_LAST 122
#define TK_MNODES 123 #define TK_SHOW 123
#define TK_MODULES 124 #define TK_DATABASES 124
#define TK_QNODES 125 #define TK_TABLES 125
#define TK_FUNCTIONS 126 #define TK_STABLES 126
#define TK_INDEXES 127 #define TK_MNODES 127
#define TK_ACCOUNTS 128 #define TK_MODULES 128
#define TK_APPS 129 #define TK_QNODES 129
#define TK_CONNECTIONS 130 #define TK_FUNCTIONS 130
#define TK_LICENCE 131 #define TK_INDEXES 131
#define TK_GRANTS 132 #define TK_ACCOUNTS 132
#define TK_QUERIES 133 #define TK_APPS 133
#define TK_SCORES 134 #define TK_CONNECTIONS 134
#define TK_TOPICS 135 #define TK_LICENCE 135
#define TK_VARIABLES 136 #define TK_GRANTS 136
#define TK_BNODES 137 #define TK_QUERIES 137
#define TK_SNODES 138 #define TK_SCORES 138
#define TK_CLUSTER 139 #define TK_TOPICS 139
#define TK_TRANSACTIONS 140 #define TK_VARIABLES 140
#define TK_DISTRIBUTED 141 #define TK_BNODES 141
#define TK_LIKE 142 #define TK_SNODES 142
#define TK_INDEX 143 #define TK_CLUSTER 143
#define TK_FULLTEXT 144 #define TK_TRANSACTIONS 144
#define TK_FUNCTION 145 #define TK_DISTRIBUTED 145
#define TK_INTERVAL 146 #define TK_CONSUMERS 146
#define TK_TOPIC 147 #define TK_SUBSCRIPTIONS 147
#define TK_AS 148 #define TK_LIKE 148
#define TK_CONSUMER 149 #define TK_INDEX 149
#define TK_GROUP 150 #define TK_FUNCTION 150
#define TK_DESC 151 #define TK_INTERVAL 151
#define TK_DESCRIBE 152 #define TK_TOPIC 152
#define TK_RESET 153 #define TK_AS 153
#define TK_QUERY 154 #define TK_WITH 154
#define TK_CACHE 155 #define TK_META 155
#define TK_EXPLAIN 156 #define TK_CONSUMER 156
#define TK_ANALYZE 157 #define TK_GROUP 157
#define TK_VERBOSE 158 #define TK_DESC 158
#define TK_NK_BOOL 159 #define TK_DESCRIBE 159
#define TK_RATIO 160 #define TK_RESET 160
#define TK_NK_FLOAT 161 #define TK_QUERY 161
#define TK_COMPACT 162 #define TK_CACHE 162
#define TK_VNODES 163 #define TK_EXPLAIN 163
#define TK_IN 164 #define TK_ANALYZE 164
#define TK_OUTPUTTYPE 165 #define TK_VERBOSE 165
#define TK_AGGREGATE 166 #define TK_NK_BOOL 166
#define TK_BUFSIZE 167 #define TK_RATIO 167
#define TK_STREAM 168 #define TK_NK_FLOAT 168
#define TK_INTO 169 #define TK_COMPACT 169
#define TK_TRIGGER 170 #define TK_VNODES 170
#define TK_AT_ONCE 171 #define TK_IN 171
#define TK_WINDOW_CLOSE 172 #define TK_OUTPUTTYPE 172
#define TK_KILL 173 #define TK_AGGREGATE 173
#define TK_CONNECTION 174 #define TK_BUFSIZE 174
#define TK_TRANSACTION 175 #define TK_STREAM 175
#define TK_BALANCE 176 #define TK_INTO 176
#define TK_VGROUP 177 #define TK_TRIGGER 177
#define TK_MERGE 178 #define TK_AT_ONCE 178
#define TK_REDISTRIBUTE 179 #define TK_WINDOW_CLOSE 179
#define TK_SPLIT 180 #define TK_IGNORE 180
#define TK_SYNCDB 181 #define TK_EXPIRED 181
#define TK_DELETE 182 #define TK_KILL 182
#define TK_NULL 183 #define TK_CONNECTION 183
#define TK_NK_QUESTION 184 #define TK_TRANSACTION 184
#define TK_NK_ARROW 185 #define TK_BALANCE 185
#define TK_ROWTS 186 #define TK_VGROUP 186
#define TK_TBNAME 187 #define TK_MERGE 187
#define TK_QSTARTTS 188 #define TK_REDISTRIBUTE 188
#define TK_QENDTS 189 #define TK_SPLIT 189
#define TK_WSTARTTS 190 #define TK_SYNCDB 190
#define TK_WENDTS 191 #define TK_DELETE 191
#define TK_WDURATION 192 #define TK_INSERT 192
#define TK_CAST 193 #define TK_NULL 193
#define TK_NOW 194 #define TK_NK_QUESTION 194
#define TK_TODAY 195 #define TK_NK_ARROW 195
#define TK_TIMEZONE 196 #define TK_ROWTS 196
#define TK_COUNT 197 #define TK_TBNAME 197
#define TK_LAST_ROW 198 #define TK_QSTART 198
#define TK_BETWEEN 199 #define TK_QEND 199
#define TK_IS 200 #define TK_QDURATION 200
#define TK_NK_LT 201 #define TK_WSTART 201
#define TK_NK_GT 202 #define TK_WEND 202
#define TK_NK_LE 203 #define TK_WDURATION 203
#define TK_NK_GE 204 #define TK_CAST 204
#define TK_NK_NE 205 #define TK_NOW 205
#define TK_MATCH 206 #define TK_TODAY 206
#define TK_NMATCH 207 #define TK_TIMEZONE 207
#define TK_CONTAINS 208 #define TK_CLIENT_VERSION 208
#define TK_JOIN 209 #define TK_SERVER_VERSION 209
#define TK_INNER 210 #define TK_SERVER_STATUS 210
#define TK_SELECT 211 #define TK_CURRENT_USER 211
#define TK_DISTINCT 212 #define TK_COUNT 212
#define TK_WHERE 213 #define TK_LAST_ROW 213
#define TK_PARTITION 214 #define TK_BETWEEN 214
#define TK_BY 215 #define TK_IS 215
#define TK_SESSION 216 #define TK_NK_LT 216
#define TK_STATE_WINDOW 217 #define TK_NK_GT 217
#define TK_SLIDING 218 #define TK_NK_LE 218
#define TK_FILL 219 #define TK_NK_GE 219
#define TK_VALUE 220 #define TK_NK_NE 220
#define TK_NONE 221 #define TK_MATCH 221
#define TK_PREV 222 #define TK_NMATCH 222
#define TK_LINEAR 223 #define TK_CONTAINS 223
#define TK_NEXT 224 #define TK_JOIN 224
#define TK_HAVING 225 #define TK_INNER 225
#define TK_RANGE 226 #define TK_SELECT 226
#define TK_EVERY 227 #define TK_DISTINCT 227
#define TK_ORDER 228 #define TK_WHERE 228
#define TK_SLIMIT 229 #define TK_PARTITION 229
#define TK_SOFFSET 230 #define TK_BY 230
#define TK_LIMIT 231 #define TK_SESSION 231
#define TK_OFFSET 232 #define TK_STATE_WINDOW 232
#define TK_ASC 233 #define TK_SLIDING 233
#define TK_NULLS 234 #define TK_FILL 234
#define TK_ID 235 #define TK_VALUE 235
#define TK_NK_BITNOT 236 #define TK_NONE 236
#define TK_INSERT 237 #define TK_PREV 237
#define TK_VALUES 238 #define TK_LINEAR 238
#define TK_IMPORT 239 #define TK_NEXT 239
#define TK_NK_SEMI 240 #define TK_HAVING 240
#define TK_FILE 241 #define TK_RANGE 241
#define TK_EVERY 242
#define TK_ORDER 243
#define TK_SLIMIT 244
#define TK_SOFFSET 245
#define TK_LIMIT 246
#define TK_OFFSET 247
#define TK_ASC 248
#define TK_NULLS 249
#define TK_ID 250
#define TK_NK_BITNOT 251
#define TK_VALUES 252
#define TK_IMPORT 253
#define TK_NK_SEMI 254
#define TK_FILE 255
#define TK_NK_SPACE 300 #define TK_NK_SPACE 300
#define TK_NK_COMMENT 301 #define TK_NK_COMMENT 301
......
...@@ -52,6 +52,7 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption); ...@@ -52,6 +52,7 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption);
* @param pMnode The mnode object to close. * @param pMnode The mnode object to close.
*/ */
void mndClose(SMnode *pMnode); void mndClose(SMnode *pMnode);
void mndPreClose(SMnode *pMnode);
/** /**
* @brief Start mnode * @brief Start mnode
...@@ -73,10 +74,12 @@ void mndStop(SMnode *pMnode); ...@@ -73,10 +74,12 @@ void mndStop(SMnode *pMnode);
* @param pMnode The mnode object. * @param pMnode The mnode object.
* @param pCluster * @param pCluster
* @param pVgroup * @param pVgroup
* @param pStbInfo
* @param pGrant * @param pGrant
* @return int32_t 0 for success, -1 for failure. * @return int32_t 0 for success, -1 for failure.
*/ */
int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pCluster, SMonVgroupInfo *pVgroup, SMonGrantInfo *pGrant); int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgroupInfo *pVgroupInfo,
SMonStbInfo *pStbInfo, SMonGrantInfo *pGrantInfo);
/** /**
* @brief Get mnode loads for status msg. * @brief Get mnode loads for status msg.
......
...@@ -68,8 +68,10 @@ typedef struct SCatalogReq { ...@@ -68,8 +68,10 @@ typedef struct SCatalogReq {
SArray* pIndex; // element is index name SArray* pIndex; // element is index name
SArray* pUser; // element is SUserAuthInfo SArray* pUser; // element is SUserAuthInfo
SArray* pTableIndex; // element is SNAME SArray* pTableIndex; // element is SNAME
SArray* pTableCfg; // element is SNAME
bool qNodeRequired; // valid qnode bool qNodeRequired; // valid qnode
bool dNodeRequired; // valid dnode bool dNodeRequired; // valid dnode
bool svrVerRequired;
bool forceUpdate; bool forceUpdate;
} SCatalogReq; } SCatalogReq;
...@@ -79,17 +81,19 @@ typedef struct SMetaRes { ...@@ -79,17 +81,19 @@ typedef struct SMetaRes {
} SMetaRes; } SMetaRes;
typedef struct SMetaData { typedef struct SMetaData {
SArray* pDbVgroup; // pRes = SArray<SVgroupInfo>* SArray* pDbVgroup; // pRes = SArray<SVgroupInfo>*
SArray* pDbCfg; // pRes = SDbCfgInfo* SArray* pDbCfg; // pRes = SDbCfgInfo*
SArray* pDbInfo; // pRes = SDbInfo* SArray* pDbInfo; // pRes = SDbInfo*
SArray* pTableMeta; // pRes = STableMeta* SArray* pTableMeta; // pRes = STableMeta*
SArray* pTableHash; // pRes = SVgroupInfo* SArray* pTableHash; // pRes = SVgroupInfo*
SArray* pTableIndex; // pRes = SArray<STableIndexInfo>* SArray* pTableIndex; // pRes = SArray<STableIndexInfo>*
SArray* pUdfList; // pRes = SFuncInfo* SArray* pUdfList; // pRes = SFuncInfo*
SArray* pIndex; // pRes = SIndexInfo* SArray* pIndex; // pRes = SIndexInfo*
SArray* pUser; // pRes = bool* SArray* pUser; // pRes = bool*
SArray* pQnodeList; // pRes = SArray<SQueryNodeLoad>* SArray* pQnodeList; // pRes = SArray<SQueryNodeLoad>*
SArray* pDnodeList; // pRes = SArray<SEpSet>* SArray* pTableCfg; // pRes = STableCfg*
SArray* pDnodeList; // pRes = SArray<SEpSet>*
SMetaRes* pSvrVer; // pRes = char*
} SMetaData; } SMetaData;
typedef struct SCatalogCfg { typedef struct SCatalogCfg {
...@@ -143,14 +147,6 @@ int32_t catalogInit(SCatalogCfg* cfg); ...@@ -143,14 +147,6 @@ int32_t catalogInit(SCatalogCfg* cfg);
*/ */
int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle); int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle);
/**
* Free a cluster's all catalog info, usually it's not necessary, until the application is closing.
* no current or future usage should be guaranteed by application
* @param pCatalog (input, NO more usage)
* @return error code
*/
void catalogFreeHandle(SCatalog* pCatalog);
int32_t catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* version, int64_t* dbId, int32_t* tableNum); int32_t catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* version, int64_t* dbId, int32_t* tableNum);
/** /**
...@@ -266,7 +262,7 @@ int32_t catalogGetTableHashVgroup(SCatalog* pCatalog, SRequestConnInfo* pConn, c ...@@ -266,7 +262,7 @@ int32_t catalogGetTableHashVgroup(SCatalog* pCatalog, SRequestConnInfo* pConn, c
*/ */
int32_t catalogGetAllMeta(SCatalog* pCatalog, SRequestConnInfo* pConn, const SCatalogReq* pReq, SMetaData* pRsp); int32_t catalogGetAllMeta(SCatalog* pCatalog, SRequestConnInfo* pConn, const SCatalogReq* pReq, SMetaData* pRsp);
int32_t catalogAsyncGetAllMeta(SCatalog* pCtg, SRequestConnInfo* pConn, uint64_t reqId, const SCatalogReq* pReq, catalogCallback fp, void* param, int64_t* jobId); int32_t catalogAsyncGetAllMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SCatalogReq* pReq, catalogCallback fp, void* param, int64_t* jobId);
int32_t catalogGetQnodeList(SCatalog* pCatalog, SRequestConnInfo* pConn, SArray* pQnodeList); int32_t catalogGetQnodeList(SCatalog* pCatalog, SRequestConnInfo* pConn, SArray* pQnodeList);
...@@ -284,6 +280,8 @@ int32_t catalogGetIndexMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const char* ...@@ -284,6 +280,8 @@ int32_t catalogGetIndexMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const char*
int32_t catalogGetTableIndex(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes); int32_t catalogGetTableIndex(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes);
int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, STableCfg** pCfg);
int32_t catalogUpdateTableIndex(SCatalog* pCtg, STableIndexRsp *pRsp); int32_t catalogUpdateTableIndex(SCatalog* pCtg, STableIndexRsp *pRsp);
int32_t catalogGetUdfInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char* funcName, SFuncInfo* pInfo); int32_t catalogGetUdfInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char* funcName, SFuncInfo* pInfo);
...@@ -294,6 +292,8 @@ int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth); ...@@ -294,6 +292,8 @@ int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth);
int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet); int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet);
int32_t catalogGetServerVersion(SCatalog* pCtg, SRequestConnInfo *pConn, char** pVersion);
int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, SRequestConnInfo* pConn, uint64_t reqId, bool forceUpdate); int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, SRequestConnInfo* pConn, uint64_t reqId, bool forceUpdate);
int32_t catalogClearCache(void); int32_t catalogClearCache(void);
......
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef TDENGINE_COMMAND_H
#define TDENGINE_COMMAND_H
#include "cmdnodes.h" #include "cmdnodes.h"
#include "tmsg.h" #include "tmsg.h"
#include "plannodes.h" #include "plannodes.h"
...@@ -27,4 +30,4 @@ int32_t qExecExplainEnd(SExplainCtx *pCtx, SRetrieveTableRsp **pRsp); ...@@ -27,4 +30,4 @@ int32_t qExecExplainEnd(SExplainCtx *pCtx, SRetrieveTableRsp **pRsp);
int32_t qExplainUpdateExecInfo(SExplainCtx *pCtx, SExplainRsp *pRspMsg, int32_t groupId, SRetrieveTableRsp **pRsp); int32_t qExplainUpdateExecInfo(SExplainCtx *pCtx, SExplainRsp *pRspMsg, int32_t groupId, SRetrieveTableRsp **pRsp);
void qExplainFreeCtx(SExplainCtx *pCtx); void qExplainFreeCtx(SExplainCtx *pCtx);
#endif
...@@ -33,7 +33,7 @@ struct SDataSink; ...@@ -33,7 +33,7 @@ struct SDataSink;
struct SSDataBlock; struct SSDataBlock;
typedef struct SDeleterRes { typedef struct SDeleterRes {
uint64_t uid; uint64_t suid;
SArray* uidList; SArray* uidList;
int64_t skey; int64_t skey;
int64_t ekey; int64_t ekey;
...@@ -41,9 +41,14 @@ typedef struct SDeleterRes { ...@@ -41,9 +41,14 @@ typedef struct SDeleterRes {
} SDeleterRes; } SDeleterRes;
typedef struct SDeleterParam { typedef struct SDeleterParam {
SArray* pUidList; uint64_t suid;
SArray* pUidList;
} SDeleterParam; } SDeleterParam;
typedef struct SInserterParam {
SReadHandle* readHandle;
} SInserterParam;
typedef struct SDataSinkStat { typedef struct SDataSinkStat {
uint64_t cachedSize; uint64_t cachedSize;
} SDataSinkStat; } SDataSinkStat;
...@@ -95,7 +100,7 @@ void dsEndPut(DataSinkHandle handle, uint64_t useconds); ...@@ -95,7 +100,7 @@ void dsEndPut(DataSinkHandle handle, uint64_t useconds);
* @param handle * @param handle
* @param pLen data length * @param pLen data length
*/ */
void dsGetDataLength(DataSinkHandle handle, int32_t* pLen, bool* pQueryEnd); void dsGetDataLength(DataSinkHandle handle, int64_t* pLen, bool* pQueryEnd);
/** /**
* Get data, the caller needs to allocate data memory. * Get data, the caller needs to allocate data memory.
......
...@@ -30,31 +30,40 @@ struct SRpcMsg; ...@@ -30,31 +30,40 @@ struct SRpcMsg;
struct SSubplan; struct SSubplan;
typedef struct SReadHandle { typedef struct SReadHandle {
void* reader; void* tqReader;
void* meta; void* meta;
void* config; void* config;
void* vnode; void* vnode;
void* mnd; void* mnd;
SMsgCb* pMsgCb; SMsgCb* pMsgCb;
int64_t version;
bool initMetaReader;
bool initTableReader;
bool initTqReader;
} SReadHandle; } SReadHandle;
enum { // in queue mode, data streams are seperated by msg
STREAM_DATA_TYPE_SUBMIT_BLOCK = 1,
STREAM_DATA_TYPE_SSDATA_BLOCK = 2,
};
typedef enum { typedef enum {
OPTR_EXEC_MODEL_BATCH = 0x1, OPTR_EXEC_MODEL_BATCH = 0x1,
OPTR_EXEC_MODEL_STREAM = 0x2, OPTR_EXEC_MODEL_STREAM = 0x2,
OPTR_EXEC_MODEL_QUEUE = 0x3,
} EOPTR_EXEC_MODEL; } EOPTR_EXEC_MODEL;
/** /**
* Create the exec task for streaming mode * Create the exec task for stream mode
* @param pMsg
* @param SReadHandle
* @return
*/
qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers);
/**
* Create the exec task for queue mode
* @param pMsg * @param pMsg
* @param streamReadHandle * @param SReadHandle
* @return * @return
*/ */
qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, void* streamReadHandle); qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers);
/** /**
* Set the input data block for the stream scan. * Set the input data block for the stream scan.
...@@ -104,8 +113,8 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, ...@@ -104,8 +113,8 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
* @param tversion * @param tversion
* @return * @return
*/ */
int32_t qGetQueriedTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion,
int32_t* tversion); int32_t* tversion);
/** /**
* The main task execution function, including query on both table and multiple tables, * The main task execution function, including query on both table and multiple tables,
...@@ -131,13 +140,6 @@ int32_t qKillTask(qTaskInfo_t tinfo); ...@@ -131,13 +140,6 @@ int32_t qKillTask(qTaskInfo_t tinfo);
*/ */
int32_t qAsyncKillTask(qTaskInfo_t tinfo); int32_t qAsyncKillTask(qTaskInfo_t tinfo);
/**
* return whether query is completed or not
* @param tinfo
* @return
*/
int32_t qIsTaskCompleted(qTaskInfo_t tinfo);
/** /**
* destroy query info structure * destroy query info structure
* @param qHandle * @param qHandle
...@@ -159,7 +161,7 @@ int64_t qGetQueriedTableUid(qTaskInfo_t tinfo); ...@@ -159,7 +161,7 @@ int64_t qGetQueriedTableUid(qTaskInfo_t tinfo);
*/ */
int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t tagCondLen, SArray* pTableIdList); int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t tagCondLen, SArray* pTableIdList);
void qProcessFetchRsp(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet); void qProcessRspMsg(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet);
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes); int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes);
...@@ -167,6 +169,29 @@ int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len); ...@@ -167,6 +169,29 @@ int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len); int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len);
/**
* return the scan info, in the form of tuple of two items, including table uid and current timestamp
* @param tinfo
* @param uid
* @param ts
* @return
*/
int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts);
int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts);
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset);
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset);
void* qStreamExtractMetaMsg(qTaskInfo_t tinfo);
void* qExtractReaderFromStreamScanner(void* scanner);
int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner);
int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -172,24 +172,13 @@ typedef struct tExprNode { ...@@ -172,24 +172,13 @@ typedef struct tExprNode {
void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)); void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *));
typedef struct SAggFunctionInfo { typedef enum {
char name[FUNCTIONS_NAME_MAX_LENGTH]; SHOULD_FREE_COLDATA = 0x1, // the newly created column data needs to be destroyed.
int8_t type; // Scalar function or aggregation function DELEGATED_MGMT_COLDATA = 0x2, // input column data should not be released.
uint32_t functionId; // Function Id } ECOLDATA_MGMT_TYPE_E;
int8_t sFunctionId; // Transfer function for super table query
uint16_t status;
bool (*init)(SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResultCellInfo); // setup the execute environment
void (*addInput)(SqlFunctionCtx *pCtx);
// finalizer must be called after all exec has been executed to generated final result.
void (*finalize)(SqlFunctionCtx *pCtx);
void (*combine)(SqlFunctionCtx *pCtx);
int32_t (*dataReqFunc)(SqlFunctionCtx *pCtx, STimeWindow* w, int32_t colId);
} SAggFunctionInfo;
struct SScalarParam { struct SScalarParam {
ECOLDATA_MGMT_TYPE_E type;
SColumnInfoData *columnData; SColumnInfoData *columnData;
SHashObj *pHashFilter; SHashObj *pHashFilter;
int32_t hashValueType; int32_t hashValueType;
......
...@@ -34,6 +34,7 @@ typedef enum EFunctionType { ...@@ -34,6 +34,7 @@ typedef enum EFunctionType {
FUNCTION_TYPE_ELAPSED, FUNCTION_TYPE_ELAPSED,
FUNCTION_TYPE_IRATE, FUNCTION_TYPE_IRATE,
FUNCTION_TYPE_LAST_ROW, FUNCTION_TYPE_LAST_ROW,
FUNCTION_TYPE_LAST_ROWT, // TODO: removed
FUNCTION_TYPE_MAX, FUNCTION_TYPE_MAX,
FUNCTION_TYPE_MIN, FUNCTION_TYPE_MIN,
FUNCTION_TYPE_MODE, FUNCTION_TYPE_MODE,
...@@ -105,7 +106,7 @@ typedef enum EFunctionType { ...@@ -105,7 +106,7 @@ typedef enum EFunctionType {
// system function // system function
FUNCTION_TYPE_DATABASE = 3000, FUNCTION_TYPE_DATABASE = 3000,
FUNCTION_TYPE_CLIENT_VERSION, FUNCTION_TYPE_CLIENT_VERSION,
FUNCTION_TYPE_SERVER_SERSION, FUNCTION_TYPE_SERVER_VERSION,
FUNCTION_TYPE_SERVER_STATUS, FUNCTION_TYPE_SERVER_STATUS,
FUNCTION_TYPE_CURRENT_USER, FUNCTION_TYPE_CURRENT_USER,
FUNCTION_TYPE_USER, FUNCTION_TYPE_USER,
...@@ -113,16 +114,20 @@ typedef enum EFunctionType { ...@@ -113,16 +114,20 @@ typedef enum EFunctionType {
// pseudo column function // pseudo column function
FUNCTION_TYPE_ROWTS = 3500, FUNCTION_TYPE_ROWTS = 3500,
FUNCTION_TYPE_TBNAME, FUNCTION_TYPE_TBNAME,
FUNCTION_TYPE_QSTARTTS, FUNCTION_TYPE_QSTART,
FUNCTION_TYPE_QENDTS, FUNCTION_TYPE_QEND,
FUNCTION_TYPE_WSTARTTS, FUNCTION_TYPE_QDURATION,
FUNCTION_TYPE_WENDTS, FUNCTION_TYPE_WSTART,
FUNCTION_TYPE_WEND,
FUNCTION_TYPE_WDURATION, FUNCTION_TYPE_WDURATION,
// internal function // internal function
FUNCTION_TYPE_SELECT_VALUE, FUNCTION_TYPE_SELECT_VALUE,
FUNCTION_TYPE_BLOCK_DIST, // block distribution aggregate function FUNCTION_TYPE_BLOCK_DIST, // block distribution aggregate function
FUNCTION_TYPE_BLOCK_DIST_INFO, // block distribution pseudo column function FUNCTION_TYPE_BLOCK_DIST_INFO, // block distribution pseudo column function
FUNCTION_TYPE_TO_COLUMN,
FUNCTION_TYPE_GROUP_KEY,
FUNCTION_TYPE_CACHE_LAST_ROW,
// distributed splitting functions // distributed splitting functions
FUNCTION_TYPE_APERCENTILE_PARTIAL = 4000, FUNCTION_TYPE_APERCENTILE_PARTIAL = 4000,
...@@ -190,6 +195,10 @@ bool fmIsForbidWindowFunc(int32_t funcId); ...@@ -190,6 +195,10 @@ bool fmIsForbidWindowFunc(int32_t funcId);
bool fmIsForbidGroupByFunc(int32_t funcId); bool fmIsForbidGroupByFunc(int32_t funcId);
bool fmIsIntervalInterpoFunc(int32_t funcId); bool fmIsIntervalInterpoFunc(int32_t funcId);
bool fmIsInterpFunc(int32_t funcId); bool fmIsInterpFunc(int32_t funcId);
bool fmIsLastRowFunc(int32_t funcId);
bool fmIsSystemInfoFunc(int32_t funcId);
bool fmIsImplicitTsFunc(int32_t funcId);
bool fmIsClientPseudoColumnFunc(int32_t funcId);
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc); int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc);
......
此差异已折叠。
...@@ -16,6 +16,13 @@ ...@@ -16,6 +16,13 @@
#ifndef TDENGINE_TUDF_H #ifndef TDENGINE_TUDF_H
#define TDENGINE_TUDF_H #define TDENGINE_TUDF_H
#undef malloc
#define malloc malloc
#undef free
#define free free
#undef realloc
#define alloc alloc
#include <taosudf.h>
#include <stdint.h> #include <stdint.h>
#include <stdbool.h> #include <stdbool.h>
...@@ -36,56 +43,6 @@ extern "C" { ...@@ -36,56 +43,6 @@ extern "C" {
#endif #endif
#define UDF_DNODE_ID_ENV_NAME "DNODE_ID" #define UDF_DNODE_ID_ENV_NAME "DNODE_ID"
//======================================================================================
//begin API to taosd and qworker
typedef struct SUdfColumnMeta {
int16_t type;
int32_t bytes;
uint8_t precision;
uint8_t scale;
} SUdfColumnMeta;
typedef struct SUdfColumnData {
int32_t numOfRows;
int32_t rowsAlloc;
union {
struct {
int32_t nullBitmapLen;
char *nullBitmap;
int32_t dataLen;
char *data;
} fixLenCol;
struct {
int32_t varOffsetsLen;
int32_t *varOffsets;
int32_t payloadLen;
char *payload;
int32_t payloadAllocLen;
} varLenCol;
};
} SUdfColumnData;
typedef struct SUdfColumn {
SUdfColumnMeta colMeta;
bool hasNull;
SUdfColumnData colData;
} SUdfColumn;
typedef struct SUdfDataBlock {
int32_t numOfRows;
int32_t numOfCols;
SUdfColumn **udfCols;
} SUdfDataBlock;
typedef struct SUdfInterBuf {
int32_t bufLen;
char* buf;
int8_t numOfResult; //zero or one
} SUdfInterBuf;
typedef void *UdfcFuncHandle;
//low level APIs //low level APIs
/** /**
...@@ -127,177 +84,6 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock); ...@@ -127,177 +84,6 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock);
int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols, SScalarParam *output); int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols, SScalarParam *output);
int32_t cleanUpUdfs(); int32_t cleanUpUdfs();
// end API to taosd and qworker
//=============================================================================================================================
// begin API to UDF writer.
// dynamic lib init and destroy
typedef int32_t (*TUdfInitFunc)();
typedef int32_t (*TUdfDestroyFunc)();
//TODO: add API to check function arguments type, number etc.
#define UDF_MEMORY_EXP_GROWTH 1.5
#define udfColDataIsNull_var(pColumn, row) ((pColumn->colData.varLenCol.varOffsets)[row] == -1)
#define udfColDataIsNull_f(pColumn, row) ((BMCharPos(pColumn->colData.fixLenCol.nullBitmap, row) & (1u << (7u - BitPos(row)))) == (1u << (7u - BitPos(row))))
#define udfColDataSetNull_f(pColumn, row) \
do { \
BMCharPos(pColumn->colData.fixLenCol.nullBitmap, row) |= (1u << (7u - BitPos(row))); \
} while (0)
#define udfColDataSetNotNull_f(pColumn, r_) \
do { \
BMCharPos(pColumn->colData.fixLenCol.nullBitmap, r_) &= ~(1u << (7u - BitPos(r_))); \
} while (0)
#define udfColDataSetNull_var(pColumn, row) ((pColumn->colData.varLenCol.varOffsets)[row] = -1)
static FORCE_INLINE char* udfColDataGetData(const SUdfColumn* pColumn, int32_t row) {
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
return pColumn->colData.varLenCol.payload + pColumn->colData.varLenCol.varOffsets[row];
} else {
return pColumn->colData.fixLenCol.data + pColumn->colMeta.bytes * row;
}
}
static FORCE_INLINE bool udfColDataIsNull(const SUdfColumn* pColumn, int32_t row) {
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
if (pColumn->colMeta.type == TSDB_DATA_TYPE_JSON) {
if (udfColDataIsNull_var(pColumn, row)) {
return true;
}
char* data = udfColDataGetData(pColumn, row);
return (*data == TSDB_DATA_TYPE_NULL);
} else {
return udfColDataIsNull_var(pColumn, row);
}
} else {
return udfColDataIsNull_f(pColumn, row);
}
}
static FORCE_INLINE int32_t udfColEnsureCapacity(SUdfColumn* pColumn, int32_t newCapacity) {
SUdfColumnMeta *meta = &pColumn->colMeta;
SUdfColumnData *data = &pColumn->colData;
if (newCapacity== 0 || newCapacity <= data->rowsAlloc) {
return TSDB_CODE_SUCCESS;
}
int allocCapacity = TMAX(data->rowsAlloc, 8);
while (allocCapacity < newCapacity) {
allocCapacity *= UDF_MEMORY_EXP_GROWTH;
}
if (IS_VAR_DATA_TYPE(meta->type)) {
char* tmp = taosMemoryRealloc(data->varLenCol.varOffsets, sizeof(int32_t) * allocCapacity);
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
data->varLenCol.varOffsets = (int32_t*)tmp;
data->varLenCol.varOffsetsLen = sizeof(int32_t) * allocCapacity;
// for payload, add data in udfColDataAppend
} else {
char* tmp = taosMemoryRealloc(data->fixLenCol.nullBitmap, BitmapLen(allocCapacity));
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
data->fixLenCol.nullBitmap = tmp;
data->fixLenCol.nullBitmapLen = BitmapLen(allocCapacity);
if (meta->type == TSDB_DATA_TYPE_NULL) {
return TSDB_CODE_SUCCESS;
}
tmp = taosMemoryRealloc(data->fixLenCol.data, allocCapacity* meta->bytes);
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
data->fixLenCol.data = tmp;
data->fixLenCol.dataLen = allocCapacity* meta->bytes;
}
data->rowsAlloc = allocCapacity;
return TSDB_CODE_SUCCESS;
}
static FORCE_INLINE void udfColDataSetNull(SUdfColumn* pColumn, int32_t row) {
udfColEnsureCapacity(pColumn, row+1);
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
udfColDataSetNull_var(pColumn, row);
} else {
udfColDataSetNull_f(pColumn, row);
}
pColumn->hasNull = true;
}
static FORCE_INLINE int32_t udfColDataSet(SUdfColumn* pColumn, uint32_t currentRow, const char* pData, bool isNull) {
SUdfColumnMeta *meta = &pColumn->colMeta;
SUdfColumnData *data = &pColumn->colData;
udfColEnsureCapacity(pColumn, currentRow+1);
bool isVarCol = IS_VAR_DATA_TYPE(meta->type);
if (isNull) {
udfColDataSetNull(pColumn, currentRow);
} else {
if (!isVarCol) {
colDataSetNotNull_f(data->fixLenCol.nullBitmap, currentRow);
memcpy(data->fixLenCol.data + meta->bytes * currentRow, pData, meta->bytes);
} else {
int32_t dataLen = varDataTLen(pData);
if (meta->type == TSDB_DATA_TYPE_JSON) {
if (*pData == TSDB_DATA_TYPE_NULL) {
dataLen = 0;
} else if (*pData == TSDB_DATA_TYPE_NCHAR) {
dataLen = varDataTLen(pData + CHAR_BYTES);
} else if (*pData == TSDB_DATA_TYPE_BIGINT || *pData == TSDB_DATA_TYPE_DOUBLE) {
dataLen = LONG_BYTES;
} else if (*pData == TSDB_DATA_TYPE_BOOL) {
dataLen = CHAR_BYTES;
}
dataLen += CHAR_BYTES;
}
if (data->varLenCol.payloadAllocLen < data->varLenCol.payloadLen + dataLen) {
uint32_t newSize = data->varLenCol.payloadAllocLen;
if (newSize <= 1) {
newSize = 8;
}
while (newSize < data->varLenCol.payloadLen + dataLen) {
newSize = newSize * UDF_MEMORY_EXP_GROWTH;
}
char *buf = taosMemoryRealloc(data->varLenCol.payload, newSize);
if (buf == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
data->varLenCol.payload = buf;
data->varLenCol.payloadAllocLen = newSize;
}
uint32_t len = data->varLenCol.payloadLen;
data->varLenCol.varOffsets[currentRow] = len;
memcpy(data->varLenCol.payload + len, pData, dataLen);
data->varLenCol.payloadLen += dataLen;
}
}
data->numOfRows = TMAX(currentRow + 1, data->numOfRows);
return 0;
}
typedef int32_t (*TUdfScalarProcFunc)(SUdfDataBlock* block, SUdfColumn *resultCol);
typedef int32_t (*TUdfAggStartFunc)(SUdfInterBuf *buf);
typedef int32_t (*TUdfAggProcessFunc)(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf);
typedef int32_t (*TUdfAggFinishFunc)(SUdfInterBuf* buf, SUdfInterBuf *resultData);
// end API to UDF writer
//=======================================================================================================================
#ifdef __cplusplus #ifdef __cplusplus
} }
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -32,12 +32,15 @@ typedef struct SUpdateInfo { ...@@ -32,12 +32,15 @@ typedef struct SUpdateInfo {
int64_t interval; int64_t interval;
int64_t watermark; int64_t watermark;
TSKEY minTS; TSKEY minTS;
SScalableBf* pCloseWinSBF;
} SUpdateInfo; } SUpdateInfo;
SUpdateInfo *updateInfoInitP(SInterval* pInterval, int64_t watermark); SUpdateInfo *updateInfoInitP(SInterval* pInterval, int64_t watermark);
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark); SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark);
bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts); bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts);
void updateInfoDestroy(SUpdateInfo *pInfo); void updateInfoDestroy(SUpdateInfo *pInfo);
void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
#ifdef __cplusplus #ifdef __cplusplus
} }
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
文件模式从 100644 更改为 100755
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册