提交 e5113c58 编写于 作者: Y yihaoDeng

Merge branch 'develop' into test2

...@@ -4,3 +4,6 @@ ...@@ -4,3 +4,6 @@
[submodule "src/connector/grafanaplugin"] [submodule "src/connector/grafanaplugin"]
path = src/connector/grafanaplugin path = src/connector/grafanaplugin
url = https://github.com/taosdata/grafanaplugin url = https://github.com/taosdata/grafanaplugin
[submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension
url = https://github.com/huskar-t/hivemq-tdengine-extension.git
...@@ -27,6 +27,7 @@ pipeline { ...@@ -27,6 +27,7 @@ pipeline {
cd debug cd debug
cmake .. > /dev/null cmake .. > /dev/null
make > /dev/null make > /dev/null
make install > /dev/null
cd ${WKC}/tests cd ${WKC}/tests
#./test-all.sh smoke #./test-all.sh smoke
./test-all.sh pytest ./test-all.sh pytest
...@@ -79,7 +80,20 @@ pipeline { ...@@ -79,7 +80,20 @@ pipeline {
cmake .. > /dev/null cmake .. > /dev/null
make > /dev/null make > /dev/null
cd ${WKC}/tests/pytest cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000 '''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./handle_crash_gen_val_log.sh
'''
}
sh '''
date date
cd ${WKC}/tests cd ${WKC}/tests
./test-all.sh b2 ./test-all.sh b2
...@@ -124,14 +138,33 @@ pipeline { ...@@ -124,14 +138,33 @@ pipeline {
sh''' sh'''
cd ${WORKSPACE} cd ${WORKSPACE}
git checkout develop git checkout develop
cd tests/gotest
bash batchtest.sh
cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single >/dev/null
java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
''' '''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/gotest
bash batchtest.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single >/dev/null
java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
'''
}
} }
} }
...@@ -139,5 +172,82 @@ pipeline { ...@@ -139,5 +172,82 @@ pipeline {
} }
} }
post {
success {
emailext (
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
}
} }
\ No newline at end of file
...@@ -13,6 +13,7 @@ ELSEIF (TD_WINDOWS) ...@@ -13,6 +13,7 @@ ELSEIF (TD_WINDOWS)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/nodejs DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/nodejs DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/C\# DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .)
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include) INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)
......
...@@ -87,6 +87,7 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修 ...@@ -87,6 +87,7 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修
- httpPort: RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求。 - httpPort: RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求。
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。 - dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。 - logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。
- tempDir:临时文件目录,客户端和服务器的临时文件(主要是查询时用于保存中间结果的问题)将写入该目录。 默认值:Linux下为 /tmp/,Windows下为环境变量 tmp 或 temp 指向的目录。
- arbitrator:系统中裁决器的end point, 缺省值为空。 - arbitrator:系统中裁决器的end point, 缺省值为空。
- role:dnode的可选角色。0-any; 既可作为mnode,也可分配vnode;1-mgmt;只能作为mnode,不能分配vnode;2-dnode;不能作为mnode,只能分配vnode - role:dnode的可选角色。0-any; 既可作为mnode,也可分配vnode;1-mgmt;只能作为mnode,不能分配vnode;2-dnode;不能作为mnode,只能分配vnode
- debugFlag:运行日志开关。131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)。默认值:131或135(不同模块有不同的默认值)。 - debugFlag:运行日志开关。131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)。默认值:131或135(不同模块有不同的默认值)。
......
...@@ -34,7 +34,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 ...@@ -34,7 +34,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
- [SQL写入](https://www.taosdata.com/cn/documentation20/insert/#SQL写入):使用SQL insert命令向一张或多张表写入单条或多条记录 - [SQL写入](https://www.taosdata.com/cn/documentation20/insert/#SQL写入):使用SQL insert命令向一张或多张表写入单条或多条记录
- [Telegraf写入](https://www.taosdata.com/cn/documentation20/insert/#Telegraf直接写入):配置Telegraf, 不用任何代码,将采集数据直接写入 - [Telegraf写入](https://www.taosdata.com/cn/documentation20/insert/#Telegraf直接写入):配置Telegraf, 不用任何代码,将采集数据直接写入
- [Prometheus写入](https://www.taosdata.com/cn/documentation20/insert/#Prometheus直接写入):配置Prometheus, 不用任何代码,将数据直接写入 - [Prometheus写入](https://www.taosdata.com/cn/documentation20/insert/#Prometheus直接写入):配置Prometheus, 不用任何代码,将数据直接写入
- [EMQ X Broker](https://www.taosdata.com/cn/documentation20/insert/#EMQ-X-Broker直接写入):配置EMQ X,不用任何代码,就可将MQTT数据直接写入 - [EMQ X Broker](https://www.taosdata.com/cn/documentation20/insert/#EMQ-X-Broker直接写入):配置EMQ X,不用任何代码,就可将 MQTT 数据直接写入
- [HiveMQ Broker](https://www.taosdata.com/cn/documentation20/insert/#HiveMQ-Broker直接写入):通过 HiveMQ Extension,不用任何代码,就可将 MQTT 数据直接写入
## [高效查询数据](https://www.taosdata.com/cn/documentation20/queries) ## [高效查询数据](https://www.taosdata.com/cn/documentation20/queries)
......
...@@ -10,7 +10,7 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的 ...@@ -10,7 +10,7 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的
* __硬件或云服务成本降至1/5__:由于超强性能,计算资源不到通用大数据方案的1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的1/10。 * __硬件或云服务成本降至1/5__:由于超强性能,计算资源不到通用大数据方案的1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的1/10。
* __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件,大幅降低应用开发和维护的复杂度成本。 * __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件,大幅降低应用开发和维护的复杂度成本。
* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, Matlab随时进行。 * __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, Matlab随时进行。
* __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。 * __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类似标准SQL,支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似,零学习成本。 * __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类似标准SQL,支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似,零学习成本。
采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。 采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。
......
...@@ -844,7 +844,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -844,7 +844,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
- **PERCENTILE** - **PERCENTILE**
```mysql ```mysql
SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]; SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
``` ```
功能说明:统计表中某列的值百分比分位数。 功能说明:统计表中某列的值百分比分位数。
返回结果数据类型: 双精度浮点数Double。 返回结果数据类型: 双精度浮点数Double。
...@@ -1016,9 +1016,9 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER ...@@ -1016,9 +1016,9 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER
``` ```
## TAOS SQL 边界限制 ## TAOS SQL 边界限制
- 数据库名最大长度为33 - 数据库名最大长度为32
- 表名最大长度为193,每行数据最大长度16k个字符 - 表名最大长度为192,每行数据最大长度16k个字符
- 列名最大长度为65,最多允许1024列,最少需要2列,第一列必须是时间戳 - 列名最大长度为64,最多允许1024列,最少需要2列,第一列必须是时间戳
- 标签最多允许128个,可以0个,标签总长度不超过16k个字符 - 标签最多允许128个,可以0个,标签总长度不超过16k个字符
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M - SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
...@@ -35,7 +35,7 @@ TDengine相对于通用数据库,有超高的压缩比,在绝大多数场景 ...@@ -35,7 +35,7 @@ TDengine相对于通用数据库,有超高的压缩比,在绝大多数场景
Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
``` ```
示例:1000万台智能电表,每台电表每15分钟采集一次数据,每次采集的数据128字节,那么一年的原始数据量是:10000000\*128\*24\*60/15*365 = 44851T。TDengine大概需要消耗44851/5=8970T, 8.9P空间。 示例:1000万台智能电表,每台电表每15分钟采集一次数据,每次采集的数据128字节,那么一年的原始数据量是:10000000\*128\*24\*60/15*365 = 44.8512T。TDengine大概需要消耗44.851/5=8.97024T空间。
用户可以通过参数keep,设置数据在磁盘中的最大保存时长。为进一步减少存储成本,TDengine还提供多级存储,最冷的数据可以存放在最廉价的存储介质上,应用的访问不用做任何调整,只是读取速度降低了。 用户可以通过参数keep,设置数据在磁盘中的最大保存时长。为进一步减少存储成本,TDengine还提供多级存储,最冷的数据可以存放在最廉价的存储介质上,应用的访问不用做任何调整,只是读取速度降低了。
...@@ -253,7 +253,7 @@ ALTER USER <user_name> PASS <'password'>; ...@@ -253,7 +253,7 @@ ALTER USER <user_name> PASS <'password'>;
修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角 修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
``` ```
ALTER USER <user_name> PRIVILEDGE <super|write|read>; ALTER USER <user_name> PRIVILEGE <super|write|read>;
``` ```
修改用户权限为:super/write/read,不需要添加单引号 修改用户权限为:super/write/read,不需要添加单引号
......
...@@ -221,7 +221,7 @@ TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO), ...@@ -221,7 +221,7 @@ TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),
TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,**可通过设置合适的配置参数将TDengine作为数据缓存来使用,而不需要再部署Redis或其他额外的缓存系统**,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的Key-value缓存系统再将之前缓存的数据重新加载到缓存中。 TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,**可通过设置合适的配置参数将TDengine作为数据缓存来使用,而不需要再部署Redis或其他额外的缓存系统**,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的Key-value缓存系统再将之前缓存的数据重新加载到缓存中。
每个vnode有自己独立的内存,而且由多个固定大小的内存块组成,不同vnode之间完全隔离。数据写入时,类似于日志的写法,数据被顺序追加写入内存,但每个vnode维护有自己的skip list,便于迅速查找。当一半以上的内存块写满时,启动落盘操作,而且后续写的操作在新的内存块进行。这样,一个vnode里有一半内存块是保留有最近的数据的,以达到缓存、快速查找的目的。一个vnode的内存块的个数由配置参数blocks决定,内存块的大小由配置参数cache决定。 每个vnode有自己独立的内存,而且由多个固定大小的内存块组成,不同vnode之间完全隔离。数据写入时,类似于日志的写法,数据被顺序追加写入内存,但每个vnode维护有自己的skip list,便于迅速查找。当三分之一以上的内存块写满时,启动落盘操作,而且后续写的操作在新的内存块进行。这样,一个vnode里有三分之一内存块是保留有最近的数据的,以达到缓存、快速查找的目的。一个vnode的内存块的个数由配置参数blocks决定,内存块的大小由配置参数cache决定。
### 持久化存储 ### 持久化存储
TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久化存储。当vnode中缓存的数据达到一定规模时,为了不阻塞后续数据的写入,TDengine也会拉起落盘线程将缓存的数据写入持久化存储。TDengine在数据落盘时会打开新的数据库日志文件,在落盘成功后则会删除老的数据库日志文件,避免日志文件无限制的增长。 TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久化存储。当vnode中缓存的数据达到一定规模时,为了不阻塞后续数据的写入,TDengine也会拉起落盘线程将缓存的数据写入持久化存储。TDengine在数据落盘时会打开新的数据库日志文件,在落盘成功后则会删除老的数据库日志文件,避免日志文件无限制的增长。
......
...@@ -616,6 +616,43 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间 ...@@ -616,6 +616,43 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间
- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式 - httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式
- httpDebugFlag: 日志开关,131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息,默认131 - httpDebugFlag: 日志开关,131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息,默认131
## CSharp Connector
在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(dapper)框架驱动。
#### 安装TDengine客户端
C#连接器需要使用`libtaos.so``taos.h`。因此,在使用C#连接器之前,需在程序运行的Windows环境安装TDengine的Windows客户端,以便获得相关驱动文件。
安装完成后,在文件夹`C:/TDengine/examples/C#`中,将会看到两个文件
- TDengineDriver.cs 调用taos.dll文件的Native C方法
- TDengineTest.cs 参考程序示例
在文件夹`C:\Windows\System32`,将会看到`taos.dll`文件
#### 使用方法
- 将C#接口文件TDengineDriver.cs加入到应用程序所在.NET项目中
- 参考TDengineTest.cs来定义数据库连接参数,及执行数据插入、查询等操作的方法
- 因为C#接口需要用到`taos.dll`文件,用户可以将`taos.dll`文件加入.NET解决方案中
#### 注意事项
- `taos.dll`文件使用x64平台编译,所以.NET项目在生成.exe文件时,“解决方案”/“项目”的“平台”请均选择“x64”。
- 此.NET接口目前已经在Visual Studio 2013/2015/2017中验证过,其它VS版本尚待验证。
#### 第三方驱动
Maikebing.Data.Taos是一个TDengine的ADO.Net提供器,支持linux,windows。该开发包由热心贡献者`麦壳饼@@maikebing`提供,具体请参考
```
//接口下载
https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos
//用法说明
https://www.taosdata.com/blog/2020/11/02/1901.html
```
## Go Connector ## Go Connector
......
...@@ -38,9 +38,9 @@ ...@@ -38,9 +38,9 @@
6. 检查防火墙设置,确认TCP/UDP 端口6030-6042 是打开的 6. 检查防火墙设置,确认TCP/UDP 端口6030-6042 是打开的
7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH* 7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*
8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*) 8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
9. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅 9. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
检查UDP端口连接是否工作:`nc -vuz {hostIP} {port} ` 检查UDP端口连接是否工作:`nc -vuz {hostIP} {port} `
......
# 高效写入数据 # 高效写入数据
TDengine支持多种接口写入数据,包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, CSV文件等,后续还将提供Kafka, OPC等接口。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。 TDengine支持多种接口写入数据,包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV文件等,后续还将提供Kafka, OPC等接口。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。
## SQL写入 ## SQL写入
...@@ -218,7 +218,15 @@ use telegraf; ...@@ -218,7 +218,15 @@ use telegraf;
select * from cpu; select * from cpu;
``` ```
## EMQ X Broker直接写入
MQTT是一流行的物联网数据传输协议,[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDEngine,也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考 [EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)
MQTT是一流行的物联网数据传输协议,TDengine 可以很方便的接入 MQTT Broker 接受的数据并写入到 TDengine。
## EMQ Broker 直接写入
[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine,也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考 [EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)
## HiveMQ Broker 直接写入
[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器M2M通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)
...@@ -20,6 +20,9 @@ ...@@ -20,6 +20,9 @@
# data file's directory # data file's directory
# dataDir /var/lib/taos # dataDir /var/lib/taos
# temporary file's directory
# tempDir /tmp/
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only # the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
# arbitrator arbitrator_hostname:6042 # arbitrator arbitrator_hostname:6042
...@@ -256,3 +259,5 @@ ...@@ -256,3 +259,5 @@
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden # maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30 # maxBinaryDisplayWidth 30
# enable/disable telemetry reporting
# telemetryReporting 1
\ No newline at end of file
...@@ -48,6 +48,7 @@ cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_pat ...@@ -48,6 +48,7 @@ cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_pat
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin
#cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
...@@ -58,7 +59,7 @@ cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_pat ...@@ -58,7 +59,7 @@ cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_pat
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||:
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/ cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
chmod 755 ${pkg_dir}/DEBIAN/* chmod 755 ${pkg_dir}/DEBIAN/*
......
...@@ -156,9 +156,15 @@ build_time=$(date +"%F %R") ...@@ -156,9 +156,15 @@ build_time=$(date +"%F %R")
# get commint id from git # get commint id from git
gitinfo=$(git rev-parse --verify HEAD) gitinfo=$(git rev-parse --verify HEAD)
enterprise_dir="${top_dir}/../enterprise"
cd ${enterprise_dir} if [[ "$verMode" == "cluster" ]]; then
gitinfoOfInternal=$(git rev-parse --verify HEAD) enterprise_dir="${top_dir}/../enterprise"
cd ${enterprise_dir}
gitinfoOfInternal=$(git rev-parse --verify HEAD)
else
gitinfoOfInternal=NULL
fi
cd ${curr_dir} cd ${curr_dir}
# 2. cmake executable file # 2. cmake executable file
...@@ -193,23 +199,35 @@ cd ${curr_dir} ...@@ -193,23 +199,35 @@ cd ${curr_dir}
# 3. Call the corresponding script for packaging # 3. Call the corresponding script for packaging
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
echo "====do deb package for the ubuntu system====" ret='0'
output_dir="${top_dir}/debs" command -v dpkg >/dev/null 2>&1 || { ret='1'; }
if [ -d ${output_dir} ]; then if [ "$ret" -eq 0 ]; then
${csudo} rm -rf ${output_dir} echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir}
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/deb
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
else
echo "==========dpkg command not exist, so not release deb package!!!"
fi fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/deb ret='0'
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
echo "====do rpm package for the centos system====" echo "====do rpm package for the centos system===="
output_dir="${top_dir}/rpms" output_dir="${top_dir}/rpms"
if [ -d ${output_dir} ]; then if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir} ${csudo} rm -rf ${output_dir}
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/rpm
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
else
echo "==========rpmbuild command not exist, so not release rpm package!!!"
fi fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/rpm
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
fi fi
echo "====do tar.gz package for all systems====" echo "====do tar.gz package for all systems===="
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
%define cfg_install_dir /etc/taos %define cfg_install_dir /etc/taos
%define __strip /bin/true %define __strip /bin/true
Name: TDengine Name: tdengine
Version: %{_version} Version: %{_version}
Release: 3%{?dist} Release: 3%{?dist}
Summary: tdengine from taosdata Summary: tdengine from taosdata
...@@ -58,6 +58,7 @@ cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/scri ...@@ -58,6 +58,7 @@ cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/scri
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin
#cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
...@@ -65,7 +66,7 @@ cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/conn ...@@ -65,7 +66,7 @@ cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/conn
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
#Scripts executed before installation #Scripts executed before installation
...@@ -134,6 +135,7 @@ if [ $1 -eq 0 ];then ...@@ -134,6 +135,7 @@ if [ $1 -eq 0 ];then
${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdemo || :
#${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || : ${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
......
...@@ -278,11 +278,11 @@ function install_service_on_sysvinit() { ...@@ -278,11 +278,11 @@ function install_service_on_sysvinit() {
# Install taosd service # Install taosd service
if ((${os_type}==1)); then if ((${os_type}==1)); then
${csudo} cp -f ${script_dir}/../deb/init.d/taosd ${install_main_dir}/init.d ${csudo} cp -f ${script_dir}/../deb/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../deb/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd ${csudo} cp ${script_dir}/../deb/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
elif ((${os_type}==2)); then elif ((${os_type}==2)); then
${csudo} cp -f ${script_dir}/../rpm/init.d/taosd ${install_main_dir}/init.d ${csudo} cp -f ${script_dir}/../rpm/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../rpm/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd ${csudo} cp ${script_dir}/../rpm/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
fi fi
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
......
...@@ -45,6 +45,7 @@ if [ "$osType" != "Darwin" ]; then ...@@ -45,6 +45,7 @@ if [ "$osType" != "Darwin" ]; then
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else else
#bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh"
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh" bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh"
fi fi
lib_files="${build_dir}/lib/libtaos.so.${version}" lib_files="${build_dir}/lib/libtaos.so.${version}"
...@@ -110,7 +111,7 @@ mkdir -p ${install_dir}/connector ...@@ -110,7 +111,7 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/
......
...@@ -135,7 +135,7 @@ mkdir -p ${install_dir}/connector ...@@ -135,7 +135,7 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/
......
...@@ -36,6 +36,7 @@ if [ "$pagMode" == "lite" ]; then ...@@ -36,6 +36,7 @@ if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
else else
#bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh"
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh" bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh"
fi fi
...@@ -124,7 +125,7 @@ cp ${lib_files} ${install_dir}/driver ...@@ -124,7 +125,7 @@ cp ${lib_files} ${install_dir}/driver
connector_dir="${code_dir}/connector" connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
......
...@@ -156,7 +156,7 @@ cp ${lib_files} ${install_dir}/driver ...@@ -156,7 +156,7 @@ cp ${lib_files} ${install_dir}/driver
connector_dir="${code_dir}/connector" connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
......
...@@ -81,8 +81,10 @@ function install_lib() { ...@@ -81,8 +81,10 @@ function install_lib() {
${csudo} ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : ${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
} }
function install_bin() { function install_bin() {
...@@ -121,8 +123,11 @@ function install_config() { ...@@ -121,8 +123,11 @@ function install_config() {
echo -e -n "${GREEN}Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join${NC}" echo -e -n "${GREEN}Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join${NC}"
echo echo
echo -e -n "${GREEN}OR leave it blank to build one${NC}:" echo -e -n "${GREEN}OR leave it blank to build one${NC}:"
read firstEp #read firstEp
while true; do if exec < /dev/tty; then
read firstEp;
fi
while true; do
if [ ! -z "$firstEp" ]; then if [ ! -z "$firstEp" ]; then
# check the format of the firstEp # check the format of the firstEp
#if [[ $firstEp == $FQDN_PATTERN ]]; then #if [[ $firstEp == $FQDN_PATTERN ]]; then
......
...@@ -490,7 +490,7 @@ static bool balanceMontiorDropping() { ...@@ -490,7 +490,7 @@ static bool balanceMontiorDropping() {
if (pDnode->status == TAOS_DN_STATUS_OFFLINE) { if (pDnode->status == TAOS_DN_STATUS_OFFLINE) {
if (pDnode->lastAccess + tsOfflineThreshold > tsAccessSquence) continue; if (pDnode->lastAccess + tsOfflineThreshold > tsAccessSquence) continue;
if (strcmp(pDnode->dnodeEp, dnodeGetMnodeMasterEp()) == 0) continue; if (dnodeIsMasterEp(pDnode->dnodeEp)) continue;
if (mnodeGetDnodesNum() <= 1) continue; if (mnodeGetDnodesNum() <= 1) continue;
mLInfo("dnode:%d, set to removing state for it offline:%d seconds", pDnode->dnodeId, mLInfo("dnode:%d, set to removing state for it offline:%d seconds", pDnode->dnodeId,
...@@ -571,8 +571,8 @@ static void balanceCheckDnodeAccess() { ...@@ -571,8 +571,8 @@ static void balanceCheckDnodeAccess() {
if (pDnode->status != TAOS_DN_STATUS_DROPPING && pDnode->status != TAOS_DN_STATUS_OFFLINE) { if (pDnode->status != TAOS_DN_STATUS_DROPPING && pDnode->status != TAOS_DN_STATUS_OFFLINE) {
pDnode->status = TAOS_DN_STATUS_OFFLINE; pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->offlineReason = TAOS_DN_OFF_STATUS_MSG_TIMEOUT; pDnode->offlineReason = TAOS_DN_OFF_STATUS_MSG_TIMEOUT;
mInfo("dnode:%d, set to offline state, access seq:%d, last seq:%d", pDnode->dnodeId, tsAccessSquence, mInfo("dnode:%d, set to offline state, access seq:%d last seq:%d laststat:%d", pDnode->dnodeId, tsAccessSquence,
pDnode->lastAccess); pDnode->lastAccess, pDnode->status);
balanceSetVgroupOffline(pDnode); balanceSetVgroupOffline(pDnode);
} }
} }
......
...@@ -64,9 +64,8 @@ typedef struct SLocalReducer { ...@@ -64,9 +64,8 @@ typedef struct SLocalReducer {
SColumnModel * resColModel; SColumnModel * resColModel;
tExtMemBuffer ** pExtMemBuffer; // disk-based buffer tExtMemBuffer ** pExtMemBuffer; // disk-based buffer
SFillInfo* pFillInfo; // interpolation support structure SFillInfo* pFillInfo; // interpolation support structure
char * pFinalRes; // result data after interpo char* pFinalRes; // result data after interpo
tFilePage * discardData; tFilePage* discardData;
SResultInfo * pResInfo;
bool discard; bool discard;
int32_t offset; // limit offset value int32_t offset; // limit offset value
bool orderPrjOnSTable; // projection query on stable bool orderPrjOnSTable; // projection query on stable
......
...@@ -23,7 +23,7 @@ extern "C" { ...@@ -23,7 +23,7 @@ extern "C" {
#include "tscUtil.h" #include "tscUtil.h"
#include "tsclient.h" #include "tsclient.h"
void tscFetchDatablockFromSubquery(SSqlObj* pSql); void tscFetchDatablockForSubquery(SSqlObj* pSql);
void tscSetupOutputColumnIndex(SSqlObj* pSql); void tscSetupOutputColumnIndex(SSqlObj* pSql);
void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code); void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code);
......
...@@ -75,6 +75,7 @@ typedef struct SJoinSupporter { ...@@ -75,6 +75,7 @@ typedef struct SJoinSupporter {
SArray* exprList; SArray* exprList;
SFieldInfo fieldsInfo; SFieldInfo fieldsInfo;
STagCond tagCond; STagCond tagCond;
SSqlGroupbyExpr groupInfo; // group by info
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
FILE* f; // temporary file in order to create TSBuf FILE* f; // temporary file in order to create TSBuf
char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory
...@@ -82,11 +83,12 @@ typedef struct SJoinSupporter { ...@@ -82,11 +83,12 @@ typedef struct SJoinSupporter {
char* pIdTagList; // result of first stage tags char* pIdTagList; // result of first stage tags
int32_t totalLen; int32_t totalLen;
int32_t num; int32_t num;
SArray* pVgroupTables;
} SJoinSupporter; } SJoinSupporter;
typedef struct SVgroupTableInfo { typedef struct SVgroupTableInfo {
SCMVgroupInfo vgInfo; SVgroupInfo vgInfo;
SArray* itemList; //SArray<STableIdInfo> SArray* itemList; //SArray<STableIdInfo>
} SVgroupTableInfo; } SVgroupTableInfo;
static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex) { static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex) {
...@@ -215,7 +217,7 @@ SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex); ...@@ -215,7 +217,7 @@ SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex);
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache); void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache);
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta, STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols); SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo); STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo);
int32_t tscAddSubqueryInfo(SSqlCmd *pCmd); int32_t tscAddSubqueryInfo(SSqlCmd *pCmd);
...@@ -224,6 +226,9 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo); ...@@ -224,6 +226,9 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo);
void tscClearSubqueryInfo(SSqlCmd* pCmd); void tscClearSubqueryInfo(SSqlCmd* pCmd);
void tscFreeVgroupTableInfo(SArray* pVgroupTables); void tscFreeVgroupTableInfo(SArray* pVgroupTables);
SArray* tscVgroupTableInfoClone(SArray* pVgroupTables);
void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index);
void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo);
int tscGetSTableVgroupInfo(SSqlObj* pSql, int32_t clauseIndex); int tscGetSTableVgroupInfo(SSqlObj* pSql, int32_t clauseIndex);
int tscGetTableMeta(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo); int tscGetTableMeta(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo);
...@@ -234,7 +239,7 @@ void tscDoQuery(SSqlObj* pSql); ...@@ -234,7 +239,7 @@ void tscDoQuery(SSqlObj* pSql);
SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo); SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo);
void* tscVgroupInfoClear(SVgroupsInfo *pInfo); void* tscVgroupInfoClear(SVgroupsInfo *pInfo);
void tscSCMVgroupInfoCopy(SCMVgroupInfo* dst, const SCMVgroupInfo* src); void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src);
/** /**
* The create object function must be successful expect for the out of memory issue. * The create object function must be successful expect for the out of memory issue.
* *
...@@ -262,6 +267,7 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t sub ...@@ -262,6 +267,7 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t sub
void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex); void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex);
int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid); int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid);
int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId);
void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex); void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex);
......
...@@ -77,7 +77,7 @@ SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex); ...@@ -77,7 +77,7 @@ SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
* @param colId * @param colId
* @return * @return
*/ */
SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId); SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
/** /**
* check if the schema is valid or not, including following aspects: * check if the schema is valid or not, including following aspects:
...@@ -107,9 +107,6 @@ SSchema tscGetTbnameColumnSchema(); ...@@ -107,9 +107,6 @@ SSchema tscGetTbnameColumnSchema();
*/ */
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size); STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size);
//todo tags value as well as the table id structure needs refactor
char *tsGetTagsValue(STableMeta *pMeta);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -30,6 +30,7 @@ extern "C" { ...@@ -30,6 +30,7 @@ extern "C" {
#include "tsqlfunction.h" #include "tsqlfunction.h"
#include "tutil.h" #include "tutil.h"
#include "tcache.h" #include "tcache.h"
#include "tref.h"
#include "qExecutor.h" #include "qExecutor.h"
#include "qSqlparser.h" #include "qSqlparser.h"
...@@ -89,12 +90,12 @@ typedef struct STableComInfo { ...@@ -89,12 +90,12 @@ typedef struct STableComInfo {
int32_t rowSize; int32_t rowSize;
} STableComInfo; } STableComInfo;
typedef struct SCMCorVgroupInfo { typedef struct SCorVgroupInfo {
int32_t version; int32_t version;
int8_t inUse; int8_t inUse;
int8_t numOfEps; int8_t numOfEps;
SEpAddr1 epAddr[TSDB_MAX_REPLICA]; SEpAddr1 epAddr[TSDB_MAX_REPLICA];
} SCMCorVgroupInfo; } SCorVgroupInfo;
typedef struct STableMeta { typedef struct STableMeta {
STableComInfo tableInfo; STableComInfo tableInfo;
...@@ -102,8 +103,8 @@ typedef struct STableMeta { ...@@ -102,8 +103,8 @@ typedef struct STableMeta {
int16_t sversion; int16_t sversion;
int16_t tversion; int16_t tversion;
char sTableId[TSDB_TABLE_FNAME_LEN]; char sTableId[TSDB_TABLE_FNAME_LEN];
SCMVgroupInfo vgroupInfo; SVgroupInfo vgroupInfo;
SCMCorVgroupInfo corVgroupInfo; SCorVgroupInfo corVgroupInfo;
STableId id; STableId id;
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
} STableMeta; } STableMeta;
...@@ -127,7 +128,7 @@ typedef struct STableMetaInfo { ...@@ -127,7 +128,7 @@ typedef struct STableMetaInfo {
typedef struct SSqlExpr { typedef struct SSqlExpr {
char aliasName[TSDB_COL_NAME_LEN]; // as aliasName char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
SColIndex colInfo; SColIndex colInfo;
int64_t uid; // refactor use the pointer uint64_t uid; // refactor use the pointer
int16_t functionId; // function id in aAgg array int16_t functionId; // function id in aAgg array
int16_t resType; // return value type int16_t resType; // return value type
int16_t resBytes; // length of return value int16_t resBytes; // length of return value
...@@ -329,6 +330,7 @@ typedef struct STscObj { ...@@ -329,6 +330,7 @@ typedef struct STscObj {
char writeAuth : 1; char writeAuth : 1;
char superAuth : 1; char superAuth : 1;
uint32_t connId; uint32_t connId;
uint64_t rid; // ref ID returned by taosAddRef
struct SSqlObj * pHb; struct SSqlObj * pHb;
struct SSqlObj * sqlList; struct SSqlObj * sqlList;
struct SSqlStream *streamList; struct SSqlStream *streamList;
...@@ -338,16 +340,16 @@ typedef struct STscObj { ...@@ -338,16 +340,16 @@ typedef struct STscObj {
} STscObj; } STscObj;
typedef struct SSubqueryState { typedef struct SSubqueryState {
int32_t numOfRemain; // the number of remain unfinished subquery int32_t numOfRemain; // the number of remain unfinished subquery
int32_t numOfSub; // the number of total sub-queries int32_t numOfSub; // the number of total sub-queries
uint64_t numOfRetrievedRows; // total number of points in this query uint64_t numOfRetrievedRows; // total number of points in this query
} SSubqueryState; } SSubqueryState;
typedef struct SSqlObj { typedef struct SSqlObj {
void *signature; void *signature;
pthread_t owner; // owner of sql object, by which it is executed pthread_t owner; // owner of sql object, by which it is executed
STscObj *pTscObj; STscObj *pTscObj;
void *pRpcCtx; int64_t rpcRid;
void (*fp)(); void (*fp)();
void (*fetchFp)(); void (*fetchFp)();
void *param; void *param;
...@@ -430,14 +432,6 @@ void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache); ...@@ -430,14 +432,6 @@ void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache);
*/ */
void tscFreeSqlResult(SSqlObj *pSql); void tscFreeSqlResult(SSqlObj *pSql);
/**
* only free part of resources allocated during query.
* TODO remove it later
* Note: this function is multi-thread safe.
* @param pObj
*/
void tscPartiallyFreeSqlObj(SSqlObj *pSql);
/** /**
* free sql object, release allocated resource * free sql object, release allocated resource
* @param pObj * @param pObj
...@@ -446,7 +440,7 @@ void tscFreeSqlObj(SSqlObj *pSql); ...@@ -446,7 +440,7 @@ void tscFreeSqlObj(SSqlObj *pSql);
void tscFreeRegisteredSqlObj(void *pSql); void tscFreeRegisteredSqlObj(void *pSql);
void tscFreeTableMetaHelper(void *pTableMeta); void tscFreeTableMetaHelper(void *pTableMeta);
void tscCloseTscObj(STscObj *pObj); void tscCloseTscObj(void *pObj);
// todo move to taos? or create a new file: taos_internal.h // todo move to taos? or create a new file: taos_internal.h
TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
...@@ -516,12 +510,12 @@ extern void * tscQhandle; ...@@ -516,12 +510,12 @@ extern void * tscQhandle;
extern int tscKeepConn[]; extern int tscKeepConn[];
extern int tsInsertHeadSize; extern int tsInsertHeadSize;
extern int tscNumOfThreads; extern int tscNumOfThreads;
extern int tscRefId;
extern SRpcCorEpSet tscMgmtEpSet; extern SRpcCorEpSet tscMgmtEpSet;
extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo); extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);
int32_t tscCompareTidTags(const void* p1, const void* p2);
void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables); void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables);
#ifdef __cplusplus #ifdef __cplusplus
......
...@@ -176,7 +176,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo ...@@ -176,7 +176,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
} }
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) { if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql); tscFetchDatablockForSubquery(pSql);
} else { } else {
tscProcessSql(pSql); tscProcessSql(pSql);
} }
...@@ -226,7 +226,7 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) { ...@@ -226,7 +226,7 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
// handle the sub queries of join query // handle the sub queries of join query
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) { if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql); tscFetchDatablockForSubquery(pSql);
} else if (pRes->completed) { } else if (pRes->completed) {
if(pCmd->command == TSDB_SQL_FETCH || (pCmd->command >= TSDB_SQL_SERV_STATUS && pCmd->command <= TSDB_SQL_CURRENT_USER)) { if(pCmd->command == TSDB_SQL_FETCH || (pCmd->command >= TSDB_SQL_SERV_STATUS && pCmd->command <= TSDB_SQL_CURRENT_USER)) {
if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes. if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
...@@ -405,7 +405,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { ...@@ -405,7 +405,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
pRes->code = code; pRes->code = code;
const char* msg = (pCmd->command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta"; SSqlObj *sub = (SSqlObj*) res;
const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code)); tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code));
goto _error; goto _error;
...@@ -427,8 +428,12 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { ...@@ -427,8 +428,12 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
} else { } else {
assert(code == TSDB_CODE_SUCCESS); assert(code == TSDB_CODE_SUCCESS);
} }
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0) && pSql->param != NULL); // param already freed by other routine and pSql in tscCache when ctrl + c
if (atomic_load_ptr(&pSql->param) == NULL) {
return;
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param; SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param;
SSqlObj * pParObj = trs->pParentSql; SSqlObj * pParObj = trs->pParentSql;
...@@ -437,6 +442,20 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { ...@@ -437,6 +442,20 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex && assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex &&
pTableMetaInfo->vgroupIndex >= 0 && pTableMetaInfo->vgroupList != NULL); pTableMetaInfo->vgroupIndex >= 0 && pTableMetaInfo->vgroupList != NULL);
// tscProcessSql can add error into async res
tscProcessSql(pSql);
return;
} else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) {
tscDebug("%p update table meta in local cache, continue to process sql and send corresponding tid_tag query", pSql);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
} else {
assert(code == TSDB_CODE_SUCCESS);
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
// tscProcessSql can add error into async res // tscProcessSql can add error into async res
tscProcessSql(pSql); tscProcessSql(pSql);
return; return;
...@@ -461,7 +480,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { ...@@ -461,7 +480,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
tscResetSqlCmdObj(pCmd, false); tscResetSqlCmdObj(pCmd, false);
code = tsParseSql(pSql, true); code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return; return;
} else if (code != TSDB_CODE_SUCCESS) { } else if (code != TSDB_CODE_SUCCESS) {
......
此差异已折叠。
...@@ -49,82 +49,6 @@ typedef struct SCreateBuilder { ...@@ -49,82 +49,6 @@ typedef struct SCreateBuilder {
} SCreateBuilder; } SCreateBuilder;
static void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength); static void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength);
static int32_t getToStringLength(const char *pData, int32_t length, int32_t type) {
char buf[512] = {0};
int32_t len = 0;
int32_t MAX_BOOL_TYPE_LENGTH = 5; // max(strlen("true"), strlen("false"));
switch (type) {
case TSDB_DATA_TYPE_BINARY:
return length;
case TSDB_DATA_TYPE_NCHAR:
return length;
case TSDB_DATA_TYPE_DOUBLE: {
double dv = 0;
dv = GET_DOUBLE_VAL(pData);
len = sprintf(buf, "%lf", dv);
if (strncasecmp("nan", buf, 3) == 0) {
len = 4;
}
} break;
case TSDB_DATA_TYPE_FLOAT: {
float fv = 0;
fv = GET_FLOAT_VAL(pData);
len = sprintf(buf, "%f", fv);
if (strncasecmp("nan", buf, 3) == 0) {
len = 4;
}
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
len = sprintf(buf, "%" PRId64, *(int64_t *)pData);
break;
case TSDB_DATA_TYPE_BOOL:
len = MAX_BOOL_TYPE_LENGTH;
break;
default:
len = sprintf(buf, "%d", *(int32_t *)pData);
break;
};
return len;
}
/*
* we need to convert all data into string, so we need to sprintf all kinds of
* non-string data into string, and record its length to get the right
* maximum length. The length may be less or greater than its original binary length:
* For example:
* length((short) 1) == 1, less than sizeof(short)
* length((uint64_t) 123456789011) > 12, greater than sizsof(uint64_t)
*/
static int32_t tscMaxLengthOfTagsFields(SSqlObj *pSql) {
STableMeta *pMeta = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0)->pTableMeta;
if (pMeta->tableType == TSDB_SUPER_TABLE || pMeta->tableType == TSDB_NORMAL_TABLE ||
pMeta->tableType == TSDB_STREAM_TABLE) {
return 0;
}
char * pTagValue = tsGetTagsValue(pMeta);
SSchema *pTagsSchema = tscGetTableTagSchema(pMeta);
int32_t len = getToStringLength(pTagValue, pTagsSchema[0].bytes, pTagsSchema[0].type);
pTagValue += pTagsSchema[0].bytes;
int32_t numOfTags = tscGetNumOfTags(pMeta);
for (int32_t i = 1; i < numOfTags; ++i) {
int32_t tLen = getToStringLength(pTagValue, pTagsSchema[i].bytes, pTagsSchema[i].type);
if (len < tLen) {
len = tLen;
}
pTagValue += pTagsSchema[i].bytes;
}
return len;
}
static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
...@@ -186,8 +110,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { ...@@ -186,8 +110,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
return 0; return 0;
} }
// the following is handle display tags value for meters created according to metric // the following is handle display tags for table created according to super table
char *pTagValue = tsGetTagsValue(pMeta);
for (int32_t i = numOfRows; i < totalNumOfRows; ++i) { for (int32_t i = numOfRows; i < totalNumOfRows; ++i) {
// field name // field name
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0); TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0);
...@@ -219,8 +142,6 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { ...@@ -219,8 +142,6 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
char *target = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i; char *target = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i;
const char *src = "TAG"; const char *src = "TAG";
STR_WITH_MAXSIZE_TO_VARSTR(target, src, pField->bytes); STR_WITH_MAXSIZE_TO_VARSTR(target, src, pField->bytes);
pTagValue += pSchema[i].bytes;
} }
return 0; return 0;
...@@ -286,10 +207,10 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) { ...@@ -286,10 +207,10 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
const int32_t TYPE_COLUMN_LENGTH = 16; const int32_t TYPE_COLUMN_LENGTH = 16;
const int32_t NOTE_COLUMN_MIN_LENGTH = 8; const int32_t NOTE_COLUMN_MIN_LENGTH = 8;
int32_t noteFieldLen = tscMaxLengthOfTagsFields(pSql); int32_t noteFieldLen = NOTE_COLUMN_MIN_LENGTH;//tscMaxLengthOfTagsFields(pSql);
if (noteFieldLen == 0) { // if (noteFieldLen == 0) {
noteFieldLen = NOTE_COLUMN_MIN_LENGTH; // noteFieldLen = NOTE_COLUMN_MIN_LENGTH;
} // }
int32_t rowLen = tscBuildTableSchemaResultFields(pSql, NUM_OF_DESC_TABLE_COLUMNS, TYPE_COLUMN_LENGTH, noteFieldLen); int32_t rowLen = tscBuildTableSchemaResultFields(pSql, NUM_OF_DESC_TABLE_COLUMNS, TYPE_COLUMN_LENGTH, noteFieldLen);
tscFieldInfoUpdateOffset(pQueryInfo); tscFieldInfoUpdateOffset(pQueryInfo);
......
...@@ -97,14 +97,14 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc ...@@ -97,14 +97,14 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc
pCtx->param[2].i64Key = pQueryInfo->order.order; pCtx->param[2].i64Key = pQueryInfo->order.order;
pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT;
pCtx->param[1].i64Key = pQueryInfo->order.orderColId; pCtx->param[1].i64Key = pQueryInfo->order.orderColId;
} else if (functionId == TSDB_FUNC_APERCT) {
pCtx->param[0].i64Key = pExpr->param[0].i64Key;
pCtx->param[0].nType = pExpr->param[0].nType;
} }
SResultInfo *pResInfo = &pReducer->pResInfo[i]; pCtx->interBufBytes = pExpr->interBytes;
pResInfo->bufLen = pExpr->interBytes; pCtx->resultInfo = calloc(1, pCtx->interBufBytes + sizeof(SResultRowCellInfo));
pResInfo->interResultBuf = calloc(1, (size_t) pResInfo->bufLen); pCtx->stableQuery = true;
pCtx->resultInfo = &pReducer->pResInfo[i];
pCtx->resultInfo->superTableQ = true;
} }
int16_t n = 0; int16_t n = 0;
...@@ -227,7 +227,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -227,7 +227,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (ds == NULL) { if (ds == NULL) {
tscError("%p failed to create merge structure", pSql); tscError("%p failed to create merge structure", pSql);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
taosTFree(pReducer); tfree(pReducer);
return; return;
} }
...@@ -254,7 +254,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -254,7 +254,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (ds->filePage.num == 0) { // no data in this flush, the index does not increase if (ds->filePage.num == 0) { // no data in this flush, the index does not increase
tscDebug("%p flush data is empty, ignore %d flush record", pSql, idx); tscDebug("%p flush data is empty, ignore %d flush record", pSql, idx);
taosTFree(ds); tfree(ds);
continue; continue;
} }
...@@ -264,7 +264,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -264,7 +264,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
// no data actually, no need to merge result. // no data actually, no need to merge result.
if (idx == 0) { if (idx == 0) {
taosTFree(pReducer); tfree(pReducer);
return; return;
} }
...@@ -272,7 +272,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -272,7 +272,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
SCompareParam *param = malloc(sizeof(SCompareParam)); SCompareParam *param = malloc(sizeof(SCompareParam));
if (param == NULL) { if (param == NULL) {
taosTFree(pReducer); tfree(pReducer);
return; return;
} }
...@@ -286,8 +286,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -286,8 +286,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator); pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator);
if (pReducer->pLoserTree == NULL || pRes->code != 0) { if (pReducer->pLoserTree == NULL || pRes->code != 0) {
taosTFree(param); tfree(param);
taosTFree(pReducer); tfree(pReducer);
return; return;
} }
...@@ -330,14 +330,14 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -330,14 +330,14 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (pReducer->pTempBuffer == NULL || pReducer->discardData == NULL || pReducer->pResultBuf == NULL || if (pReducer->pTempBuffer == NULL || pReducer->discardData == NULL || pReducer->pResultBuf == NULL ||
/*pReducer->pBufForInterpo == NULL || */pReducer->pFinalRes == NULL || pReducer->prevRowOfInput == NULL) { /*pReducer->pBufForInterpo == NULL || */pReducer->pFinalRes == NULL || pReducer->prevRowOfInput == NULL) {
taosTFree(pReducer->pTempBuffer); tfree(pReducer->pTempBuffer);
taosTFree(pReducer->discardData); tfree(pReducer->discardData);
taosTFree(pReducer->pResultBuf); tfree(pReducer->pResultBuf);
taosTFree(pReducer->pFinalRes); tfree(pReducer->pFinalRes);
taosTFree(pReducer->prevRowOfInput); tfree(pReducer->prevRowOfInput);
taosTFree(pReducer->pLoserTree); tfree(pReducer->pLoserTree);
taosTFree(param); tfree(param);
taosTFree(pReducer); tfree(pReducer);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return; return;
} }
...@@ -345,7 +345,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -345,7 +345,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
size_t numOfCols = tscSqlExprNumOfExprs(pQueryInfo); size_t numOfCols = tscSqlExprNumOfExprs(pQueryInfo);
pReducer->pTempBuffer->num = 0; pReducer->pTempBuffer->num = 0;
pReducer->pResInfo = calloc(numOfCols, sizeof(SResultInfo));
tscCreateResPointerInfo(pRes, pQueryInfo); tscCreateResPointerInfo(pRes, pQueryInfo);
tscInitSqlContext(pCmd, pReducer, pDesc); tscInitSqlContext(pCmd, pReducer, pDesc);
...@@ -489,47 +488,40 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { ...@@ -489,47 +488,40 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
tscDebug("%p waiting for delete procedure, status: %d", pSql, status); tscDebug("%p waiting for delete procedure, status: %d", pSql, status);
} }
pLocalReducer->pFillInfo = taosDestoryFillInfo(pLocalReducer->pFillInfo); pLocalReducer->pFillInfo = taosDestroyFillInfo(pLocalReducer->pFillInfo);
if (pLocalReducer->pCtx != NULL) { if (pLocalReducer->pCtx != NULL) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i]; SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i];
tVariantDestroy(&pCtx->tag); tVariantDestroy(&pCtx->tag);
tfree(pCtx->resultInfo);
if (pCtx->tagInfo.pTagCtxList != NULL) { if (pCtx->tagInfo.pTagCtxList != NULL) {
taosTFree(pCtx->tagInfo.pTagCtxList); tfree(pCtx->tagInfo.pTagCtxList);
} }
} }
taosTFree(pLocalReducer->pCtx); tfree(pLocalReducer->pCtx);
} }
taosTFree(pLocalReducer->prevRowOfInput); tfree(pLocalReducer->prevRowOfInput);
taosTFree(pLocalReducer->pTempBuffer);
taosTFree(pLocalReducer->pResultBuf);
if (pLocalReducer->pResInfo != NULL) {
size_t num = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < num; ++i) {
taosTFree(pLocalReducer->pResInfo[i].interResultBuf);
}
taosTFree(pLocalReducer->pResInfo); tfree(pLocalReducer->pTempBuffer);
} tfree(pLocalReducer->pResultBuf);
if (pLocalReducer->pLoserTree) { if (pLocalReducer->pLoserTree) {
taosTFree(pLocalReducer->pLoserTree->param); tfree(pLocalReducer->pLoserTree->param);
taosTFree(pLocalReducer->pLoserTree); tfree(pLocalReducer->pLoserTree);
} }
taosTFree(pLocalReducer->pFinalRes); tfree(pLocalReducer->pFinalRes);
taosTFree(pLocalReducer->discardData); tfree(pLocalReducer->discardData);
tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel, tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel,
pLocalReducer->numOfVnode); pLocalReducer->numOfVnode);
for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) { for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) {
taosTFree(pLocalReducer->pLocalDataSrc[i]); tfree(pLocalReducer->pLocalDataSrc[i]);
} }
pLocalReducer->numOfBuffer = 0; pLocalReducer->numOfBuffer = 0;
...@@ -596,7 +588,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm ...@@ -596,7 +588,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
} }
*pOrderDesc = tOrderDesCreate(orderColIndexList, numOfGroupByCols, pModel, pQueryInfo->order.order); *pOrderDesc = tOrderDesCreate(orderColIndexList, numOfGroupByCols, pModel, pQueryInfo->order.order);
taosTFree(orderColIndexList); tfree(orderColIndexList);
if (*pOrderDesc == NULL) { if (*pOrderDesc == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
...@@ -698,7 +690,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr ...@@ -698,7 +690,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
pg *= 2; pg *= 2;
} }
size_t numOfSubs = pTableMetaInfo->vgroupList->numOfVgroups; size_t numOfSubs = pSql->subState.numOfSub;
assert(numOfSubs <= pTableMetaInfo->vgroupList->numOfVgroups);
for (int32_t i = 0; i < numOfSubs; ++i) { for (int32_t i = 0; i < numOfSubs; ++i) {
(*pMemBuffer)[i] = createExtMemBuffer(nBufferSizes, rlen, pg, pModel); (*pMemBuffer)[i] = createExtMemBuffer(nBufferSizes, rlen, pg, pModel);
(*pMemBuffer)[i]->flushModel = MULTIPLE_APPEND_MODEL; (*pMemBuffer)[i]->flushModel = MULTIPLE_APPEND_MODEL;
...@@ -706,7 +699,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr ...@@ -706,7 +699,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
if (createOrderDescriptor(pOrderDesc, pCmd, pModel) != TSDB_CODE_SUCCESS) { if (createOrderDescriptor(pOrderDesc, pCmd, pModel) != TSDB_CODE_SUCCESS) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
taosTFree(pSchema); tfree(pSchema);
return pRes->code; return pRes->code;
} }
...@@ -743,7 +736,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr ...@@ -743,7 +736,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
} }
*pFinalModel = createColumnModel(pSchema, (int32_t)size, capacity); *pFinalModel = createColumnModel(pSchema, (int32_t)size, capacity);
taosTFree(pSchema); tfree(pSchema);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -763,7 +756,7 @@ void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDe ...@@ -763,7 +756,7 @@ void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDe
pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]); pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]);
} }
taosTFree(pMemBuffer); tfree(pMemBuffer);
} }
/** /**
...@@ -985,10 +978,10 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO ...@@ -985,10 +978,10 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
pBeforeFillData->num = 0; pBeforeFillData->num = 0;
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
taosTFree(pResPages[i]); tfree(pResPages[i]);
} }
taosTFree(pResPages); tfree(pResPages);
} }
static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) { static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
...@@ -1071,7 +1064,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx) ...@@ -1071,7 +1064,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx)
continue; continue;
} }
SResultInfo* pResInfo = GET_RES_INFO(&pCtx[j]); SResultRowCellInfo* pResInfo = GET_RES_INFO(&pCtx[j]);
if (maxOutput < pResInfo->numOfRes) { if (maxOutput < pResInfo->numOfRes) {
maxOutput = pResInfo->numOfRes; maxOutput = pResInfo->numOfRes;
} }
...@@ -1252,10 +1245,11 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur ...@@ -1252,10 +1245,11 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
return true; return true;
} }
void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) { // reset output buffer to the beginning void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {// reset output buffer to the beginning
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { size_t t = tscSqlExprNumOfExprs(pQueryInfo);
pLocalReducer->pCtx[i].aOutputBuf = for (int32_t i = 0; i < t; ++i) {
pLocalReducer->pResultBuf->data + tscFieldInfoGetOffset(pQueryInfo, i) * pLocalReducer->resColModel->capacity; SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
pLocalReducer->pCtx[i].aOutputBuf = pLocalReducer->pResultBuf->data + pExpr->offset * pLocalReducer->resColModel->capacity;
} }
memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage)); memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage));
...@@ -1500,8 +1494,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) { ...@@ -1500,8 +1494,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
if (pLocalReducer->discard && sameGroup) { if (pLocalReducer->discard && sameGroup) {
pLocalReducer->hasUnprocessedRow = false; pLocalReducer->hasUnprocessedRow = false;
tmpBuffer->num = 0; tmpBuffer->num = 0;
} else { } else { // current row does not belongs to the previous group, so it is not be handled yet.
// current row does not belongs to the previous group, so it is not be handled yet.
pLocalReducer->hasUnprocessedRow = true; pLocalReducer->hasUnprocessedRow = true;
} }
......
...@@ -702,7 +702,7 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st ...@@ -702,7 +702,7 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st
} }
int32_t code = TSDB_CODE_TSC_INVALID_SQL; int32_t code = TSDB_CODE_TSC_INVALID_SQL;
char * tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \" char * tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) { if (NULL == tmpTokenBuf) {
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
...@@ -1309,7 +1309,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) { ...@@ -1309,7 +1309,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
if ((!pCmd->parseFinished) && (!initial)) { if ((!pCmd->parseFinished) && (!initial)) {
tscDebug("%p resume to parse sql: %s", pSql, pCmd->curSql); tscDebug("%p resume to parse sql: %s", pSql, pCmd->curSql);
} }
ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE); ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE);
if (TSDB_CODE_SUCCESS != ret) { if (TSDB_CODE_SUCCESS != ret) {
return ret; return ret;
...@@ -1406,7 +1406,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { ...@@ -1406,7 +1406,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
assert(taos_errno(pSql) == code); assert(taos_errno(pSql) == code);
taos_free_result(pSql); taos_free_result(pSql);
taosTFree(pSupporter); tfree(pSupporter);
fclose(fp); fclose(fp);
pParentSql->res.code = code; pParentSql->res.code = code;
...@@ -1445,7 +1445,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { ...@@ -1445,7 +1445,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
char *tokenBuf = calloc(1, 4096); char *tokenBuf = calloc(1, 4096);
while ((readLen = taosGetline(&line, &n, fp)) != -1) { while ((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
line[--readLen] = 0; line[--readLen] = 0;
} }
...@@ -1470,7 +1470,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { ...@@ -1470,7 +1470,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
} }
} }
taosTFree(tokenBuf); tfree(tokenBuf);
free(line); free(line);
if (count > 0) { if (count > 0) {
...@@ -1483,7 +1483,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { ...@@ -1483,7 +1483,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
} else { } else {
taos_free_result(pSql); taos_free_result(pSql);
taosTFree(pSupporter); tfree(pSupporter);
fclose(fp); fclose(fp);
pParentSql->fp = pParentSql->fetchFp; pParentSql->fp = pParentSql->fetchFp;
...@@ -1513,7 +1513,7 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) { ...@@ -1513,7 +1513,7 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
pSql->res.code = TAOS_SYSTEM_ERROR(errno); pSql->res.code = TAOS_SYSTEM_ERROR(errno);
tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code)); tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code));
taosTFree(pSupporter) tfree(pSupporter)
tscQueueAsyncRes(pSql); tscQueueAsyncRes(pSql);
return; return;
......
...@@ -222,7 +222,7 @@ void tscKillStream(STscObj *pObj, uint32_t killId) { ...@@ -222,7 +222,7 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
} }
int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
SCMHeartBeatMsg *pHeartbeat = pMsg; SHeartBeatMsg *pHeartbeat = pMsg;
int allocedQueriesNum = pHeartbeat->numOfQueries; int allocedQueriesNum = pHeartbeat->numOfQueries;
int allocedStreamsNum = pHeartbeat->numOfStreams; int allocedStreamsNum = pHeartbeat->numOfStreams;
...@@ -277,7 +277,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { ...@@ -277,7 +277,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
} }
int32_t msgLen = pHeartbeat->numOfQueries * sizeof(SQueryDesc) + pHeartbeat->numOfStreams * sizeof(SStreamDesc) + int32_t msgLen = pHeartbeat->numOfQueries * sizeof(SQueryDesc) + pHeartbeat->numOfStreams * sizeof(SStreamDesc) +
sizeof(SCMHeartBeatMsg); sizeof(SHeartBeatMsg);
pHeartbeat->connId = htonl(pObj->connId); pHeartbeat->connId = htonl(pObj->connId);
pHeartbeat->numOfQueries = htonl(pHeartbeat->numOfQueries); pHeartbeat->numOfQueries = htonl(pHeartbeat->numOfQueries);
pHeartbeat->numOfStreams = htonl(pHeartbeat->numOfStreams); pHeartbeat->numOfStreams = htonl(pHeartbeat->numOfStreams);
......
此差异已折叠。
...@@ -118,7 +118,7 @@ SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t colIndex) ...@@ -118,7 +118,7 @@ SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t colIndex)
} }
// TODO for large number of columns, employ the binary search method // TODO for large number of columns, employ the binary search method
SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId) { SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId) {
STableComInfo tinfo = tscGetTableInfo(pTableMeta); STableComInfo tinfo = tscGetTableInfo(pTableMeta);
for(int32_t i = 0; i < tinfo.numOfColumns + tinfo.numOfTags; ++i) { for(int32_t i = 0; i < tinfo.numOfColumns + tinfo.numOfTags; ++i) {
...@@ -140,7 +140,7 @@ struct SSchema tscGetTbnameColumnSchema() { ...@@ -140,7 +140,7 @@ struct SSchema tscGetTbnameColumnSchema() {
strcpy(s.name, TSQL_TBNAME_L); strcpy(s.name, TSQL_TBNAME_L);
return s; return s;
} }
static void tscInitCorVgroupInfo(SCMCorVgroupInfo *corVgroupInfo, SCMVgroupInfo *vgroupInfo) { static void tscInitCorVgroupInfo(SCorVgroupInfo *corVgroupInfo, SVgroupInfo *vgroupInfo) {
corVgroupInfo->version = 0; corVgroupInfo->version = 0;
corVgroupInfo->inUse = 0; corVgroupInfo->inUse = 0;
corVgroupInfo->numOfEps = vgroupInfo->numOfEps; corVgroupInfo->numOfEps = vgroupInfo->numOfEps;
...@@ -166,7 +166,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size ...@@ -166,7 +166,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
pTableMeta->id.tid = pTableMetaMsg->tid; pTableMeta->id.tid = pTableMetaMsg->tid;
pTableMeta->id.uid = pTableMetaMsg->uid; pTableMeta->id.uid = pTableMetaMsg->uid;
SCMVgroupInfo* pVgroupInfo = &pTableMeta->vgroupInfo; SVgroupInfo* pVgroupInfo = &pTableMeta->vgroupInfo;
pVgroupInfo->numOfEps = pTableMetaMsg->vgroup.numOfEps; pVgroupInfo->numOfEps = pTableMetaMsg->vgroup.numOfEps;
pVgroupInfo->vgId = pTableMetaMsg->vgroup.vgId; pVgroupInfo->vgId = pTableMetaMsg->vgroup.vgId;
...@@ -177,7 +177,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size ...@@ -177,7 +177,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
pVgroupInfo->epAddr[i].port = pEpMsg->port; pVgroupInfo->epAddr[i].port = pEpMsg->port;
} }
tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, &pTableMeta->vgroupInfo); tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, pVgroupInfo);
pTableMeta->sversion = pTableMetaMsg->sversion; pTableMeta->sversion = pTableMetaMsg->sversion;
pTableMeta->tversion = pTableMetaMsg->tversion; pTableMeta->tversion = pTableMetaMsg->tversion;
...@@ -197,28 +197,6 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size ...@@ -197,28 +197,6 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
return pTableMeta; return pTableMeta;
} }
/**
* the TableMeta data format in memory is as follows:
*
* +--------------------+
* |STableMeta Body data| sizeof(STableMeta)
* +--------------------+
* |Schema data | numOfTotalColumns * sizeof(SSchema)
* +--------------------+
* |Tags data | tag_col_1.bytes + tag_col_2.bytes + ....
* +--------------------+
*
* @param pTableMeta
* @return
*/
char* tsGetTagsValue(STableMeta* pTableMeta) {
int32_t offset = 0;
// int32_t numOfTotalCols = pTableMeta->numOfColumns + pTableMeta->numOfTags;
// uint32_t offset = sizeof(STableMeta) + numOfTotalCols * sizeof(SSchema);
return ((char*)pTableMeta + offset);
}
// todo refactor // todo refactor
UNUSED_FUNC static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) { UNUSED_FUNC static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) {
for (int32_t i = 0; i < num; ++i) { for (int32_t i = 0; i < num; ++i) {
......
此差异已折叠。
...@@ -161,6 +161,7 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa ...@@ -161,6 +161,7 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa
registerSqlObj(pSql); registerSqlObj(pSql);
tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg); tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
pObj->rid = taosAddRef(tscRefId, pObj);
return pSql; return pSql;
} }
...@@ -278,9 +279,9 @@ void taos_close(TAOS *taos) { ...@@ -278,9 +279,9 @@ void taos_close(TAOS *taos) {
SSqlObj* pHb = pObj->pHb; SSqlObj* pHb = pObj->pHb;
if (pHb != NULL && atomic_val_compare_exchange_ptr(&pObj->pHb, pHb, 0) == pHb) { if (pHb != NULL && atomic_val_compare_exchange_ptr(&pObj->pHb, pHb, 0) == pHb) {
if (pHb->pRpcCtx != NULL) { // wait for rsp from dnode if (pHb->rpcRid > 0) { // wait for rsp from dnode
rpcCancelRequest(pHb->pRpcCtx); rpcCancelRequest(pHb->rpcRid);
pHb->pRpcCtx = NULL; pHb->rpcRid = -1;
} }
tscDebug("%p HB is freed", pHb); tscDebug("%p HB is freed", pHb);
...@@ -296,7 +297,8 @@ void taos_close(TAOS *taos) { ...@@ -296,7 +297,8 @@ void taos_close(TAOS *taos) {
} }
tscDebug("%p all sqlObj are freed, free tscObj and close dnodeConn:%p", pObj, pObj->pDnodeConn); tscDebug("%p all sqlObj are freed, free tscObj and close dnodeConn:%p", pObj, pObj->pDnodeConn);
tscCloseTscObj(pObj);
taosRemoveRef(tscRefId, pObj->rid);
} }
void waitForQueryRsp(void *param, TAOS_RES *tres, int code) { void waitForQueryRsp(void *param, TAOS_RES *tres, int code) {
...@@ -564,7 +566,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { ...@@ -564,7 +566,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) {
pRes->rspType = 0; pRes->rspType = 0;
pSql->subState.numOfSub = 0; pSql->subState.numOfSub = 0;
taosTFree(pSql->pSubs); tfree(pSql->pSubs);
assert(pSql->fp == NULL); assert(pSql->fp == NULL);
...@@ -746,9 +748,9 @@ static void tscKillSTableQuery(SSqlObj *pSql) { ...@@ -746,9 +748,9 @@ static void tscKillSTableQuery(SSqlObj *pSql) {
assert(pSubObj->self == (SSqlObj**) p); assert(pSubObj->self == (SSqlObj**) p);
pSubObj->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; pSubObj->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
if (pSubObj->pRpcCtx != NULL) { if (pSubObj->rpcRid > 0) {
rpcCancelRequest(pSubObj->pRpcCtx); rpcCancelRequest(pSubObj->rpcRid);
pSubObj->pRpcCtx = NULL; pSubObj->rpcRid = -1;
} }
tscQueueAsyncRes(pSubObj); tscQueueAsyncRes(pSubObj);
...@@ -773,7 +775,7 @@ void taos_stop_query(TAOS_RES *res) { ...@@ -773,7 +775,7 @@ void taos_stop_query(TAOS_RES *res) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
assert(pSql->pRpcCtx == NULL); assert(pSql->rpcRid <= 0);
tscKillSTableQuery(pSql); tscKillSTableQuery(pSql);
} else { } else {
if (pSql->cmd.command < TSDB_SQL_LOCAL) { if (pSql->cmd.command < TSDB_SQL_LOCAL) {
...@@ -782,9 +784,9 @@ void taos_stop_query(TAOS_RES *res) { ...@@ -782,9 +784,9 @@ void taos_stop_query(TAOS_RES *res) {
* reset and freed in the processMsgFromServer function, and causes the invalid * reset and freed in the processMsgFromServer function, and causes the invalid
* write problem for rpcCancelRequest. * write problem for rpcCancelRequest.
*/ */
if (pSql->pRpcCtx != NULL) { if (pSql->rpcRid > 0) {
rpcCancelRequest(pSql->pRpcCtx); rpcCancelRequest(pSql->rpcRid);
pSql->pRpcCtx = NULL; pSql->rpcRid = -1;
} }
tscQueueAsyncRes(pSql); tscQueueAsyncRes(pSql);
...@@ -892,7 +894,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) { ...@@ -892,7 +894,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
if (sqlLen > tsMaxSQLStringLen) { if (sqlLen > tsMaxSQLStringLen) {
tscError("%p sql too long", pSql); tscError("%p sql too long", pSql);
pRes->code = TSDB_CODE_TSC_INVALID_SQL; pRes->code = TSDB_CODE_TSC_INVALID_SQL;
taosTFree(pSql); tfree(pSql);
return pRes->code; return pRes->code;
} }
...@@ -901,7 +903,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) { ...@@ -901,7 +903,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscError("%p failed to malloc sql string buffer", pSql); tscError("%p failed to malloc sql string buffer", pSql);
tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj); tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj);
taosTFree(pSql); tfree(pSql);
return pRes->code; return pRes->code;
} }
......
...@@ -273,7 +273,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf ...@@ -273,7 +273,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false); taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false);
tscFreeSqlResult(pSql); tscFreeSqlResult(pSql);
taosTFree(pSql->pSubs); tfree(pSql->pSubs);
pSql->subState.numOfSub = 0; pSql->subState.numOfSub = 0;
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList); pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
tscSetNextLaunchTimer(pStream, pSql); tscSetNextLaunchTimer(pStream, pSql);
...@@ -617,6 +617,6 @@ void taos_close_stream(TAOS_STREAM *handle) { ...@@ -617,6 +617,6 @@ void taos_close_stream(TAOS_STREAM *handle) {
pStream->pSql = NULL; pStream->pSql = NULL;
taos_free_result(pSql); taos_free_result(pSql);
taosTFree(pStream); tfree(pStream);
} }
} }
此差异已折叠。
...@@ -36,6 +36,7 @@ void * tscTmr; ...@@ -36,6 +36,7 @@ void * tscTmr;
void * tscQhandle; void * tscQhandle;
void * tscCheckDiskUsageTmr; void * tscCheckDiskUsageTmr;
int tsInsertHeadSize; int tsInsertHeadSize;
int tscRefId = -1;
int tscNumOfThreads; int tscNumOfThreads;
...@@ -79,7 +80,7 @@ int32_t tscInitRpc(const char *user, const char *secretEncrypt, void **pDnodeCon ...@@ -79,7 +80,7 @@ int32_t tscInitRpc(const char *user, const char *secretEncrypt, void **pDnodeCon
void taos_init_imp(void) { void taos_init_imp(void) {
char temp[128]; char temp[128] = {0};
errno = TSDB_CODE_SUCCESS; errno = TSDB_CODE_SUCCESS;
srand(taosGetTimestampSec()); srand(taosGetTimestampSec());
...@@ -103,7 +104,6 @@ void taos_init_imp(void) { ...@@ -103,7 +104,6 @@ void taos_init_imp(void) {
taosReadGlobalCfg(); taosReadGlobalCfg();
taosCheckGlobalCfg(); taosCheckGlobalCfg();
taosPrintGlobalCfg();
tscDebug("starting to initialize TAOS client ..."); tscDebug("starting to initialize TAOS client ...");
tscDebug("Local End Point is:%s", tsLocalEp); tscDebug("Local End Point is:%s", tsLocalEp);
...@@ -146,29 +146,44 @@ void taos_init_imp(void) { ...@@ -146,29 +146,44 @@ void taos_init_imp(void) {
tscObjCache = taosCacheInit(TSDB_CACHE_PTR_KEY, refreshTime / 2, false, tscFreeRegisteredSqlObj, "sqlObj"); tscObjCache = taosCacheInit(TSDB_CACHE_PTR_KEY, refreshTime / 2, false, tscFreeRegisteredSqlObj, "sqlObj");
} }
tscRefId = taosOpenRef(200, tscCloseTscObj);
// in other language APIs, taos_cleanup is not available yet.
// So, to make sure taos_cleanup will be invoked to clean up the allocated
// resource to suppress the valgrind warning.
atexit(taos_cleanup);
tscDebug("client is initialized successfully"); tscDebug("client is initialized successfully");
} }
void taos_init() { pthread_once(&tscinit, taos_init_imp); } void taos_init() { pthread_once(&tscinit, taos_init_imp); }
void taos_cleanup() { // this function may be called by user or system, or by both simultaneously.
if (tscMetaCache != NULL) { void taos_cleanup(void) {
taosCacheCleanup(tscMetaCache); tscDebug("start to cleanup client environment");
tscMetaCache = NULL;
taosCacheCleanup(tscObjCache); void* m = tscMetaCache;
tscObjCache = NULL; if (m != NULL && atomic_val_compare_exchange_ptr(&tscMetaCache, m, 0) == m) {
taosCacheCleanup(m);
} }
if (tscQhandle != NULL) { m = tscObjCache;
taosCleanUpScheduler(tscQhandle); if (m != NULL && atomic_val_compare_exchange_ptr(&tscObjCache, m, 0) == m) {
tscQhandle = NULL; taosCacheCleanup(m);
}
m = tscQhandle;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscQhandle, m, 0) == m) {
taosCleanUpScheduler(m);
} }
taosCloseRef(tscRefId);
taosCleanupKeywordsTable(); taosCleanupKeywordsTable();
taosCloseLog(); taosCloseLog();
taosTmrCleanUp(tscTmr); m = tscTmr;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscTmr, m, 0) == m) {
taosTmrCleanUp(m);
}
} }
static int taos_options_imp(TSDB_OPTION option, const char *pStr) { static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
......
此差异已折叠。
此差异已折叠。
...@@ -44,13 +44,18 @@ extern int32_t tsMaxShellConns; ...@@ -44,13 +44,18 @@ extern int32_t tsMaxShellConns;
extern int32_t tsShellActivityTimer; extern int32_t tsShellActivityTimer;
extern uint32_t tsMaxTmrCtrl; extern uint32_t tsMaxTmrCtrl;
extern float tsNumOfThreadsPerCore; extern float tsNumOfThreadsPerCore;
extern float tsRatioOfQueryThreads; extern int32_t tsNumOfCommitThreads;
extern float tsRatioOfQueryThreads; // todo remove it
extern int8_t tsDaylight; extern int8_t tsDaylight;
extern char tsTimezone[]; extern char tsTimezone[];
extern char tsLocale[]; extern char tsLocale[];
extern char tsCharset[]; // default encode string extern char tsCharset[]; // default encode string
extern int32_t tsEnableCoreFile; extern int32_t tsEnableCoreFile;
extern int32_t tsCompressMsgSize; extern int32_t tsCompressMsgSize;
extern char tsTempDir[];
//query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer for each data node during query processing
// client // client
extern int32_t tsTableMetaKeepTimer; extern int32_t tsTableMetaKeepTimer;
...@@ -84,6 +89,7 @@ extern int16_t tsWAL; ...@@ -84,6 +89,7 @@ extern int16_t tsWAL;
extern int32_t tsFsyncPeriod; extern int32_t tsFsyncPeriod;
extern int32_t tsReplications; extern int32_t tsReplications;
extern int32_t tsQuorum; extern int32_t tsQuorum;
extern int32_t tsUpdate;
// balance // balance
extern int32_t tsEnableBalance; extern int32_t tsEnableBalance;
...@@ -180,13 +186,13 @@ extern int32_t debugFlag; ...@@ -180,13 +186,13 @@ extern int32_t debugFlag;
#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
void taosInitGlobalCfg(); void taosInitGlobalCfg();
bool taosCheckGlobalCfg(); int32_t taosCheckGlobalCfg();
void taosSetAllDebugFlag(); void taosSetAllDebugFlag();
bool taosCfgDynamicOptions(char *msg); bool taosCfgDynamicOptions(char *msg);
int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port); int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port);
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId); bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Subproject commit 8d7bf743852897110cbdcc7c4322cd7a74d4167b Subproject commit 8c58c512b6acda8bcdfa48fdc7140227b5221766
Subproject commit b62a26ecc164a310104df57691691b237e091c89
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>2.0.6</version> <version>2.0.9</version>
<packaging>jar</packaging> <packaging>jar</packaging>
<name>JDBCDriver</name> <name>JDBCDriver</name>
......
...@@ -52,7 +52,6 @@ public class TSDBConnection implements Connection { ...@@ -52,7 +52,6 @@ public class TSDBConnection implements Connection {
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException { public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
this.dbMetaData = meta; this.dbMetaData = meta;
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST), connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")), Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER), info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
...@@ -197,12 +196,13 @@ public class TSDBConnection implements Connection { ...@@ -197,12 +196,13 @@ public class TSDBConnection implements Connection {
} }
public SQLWarning getWarnings() throws SQLException { public SQLWarning getWarnings() throws SQLException {
//todo: implement getWarnings according to the warning messages returned from TDengine
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
} }
public void clearWarnings() throws SQLException { public void clearWarnings() throws SQLException {
// left blank to support HikariCP connection // left blank to support HikariCP connection
//todo: implement getWarnings according to the warning messages returned from TDengine //todo: implement clearWarnings according to the warning messages returned from TDengine
} }
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
......
...@@ -96,7 +96,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -96,7 +96,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public int getDriverMajorVersion() { public int getDriverMajorVersion() {
return 0; return 2;
} }
public int getDriverMinorVersion() { public int getDriverMinorVersion() {
......
此差异已折叠。
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
int64_t ver = 0; int64_t ver = 0;
void *pCq = NULL; void *pCq = NULL;
int writeToQueue(void *pVnode, void *data, int type) { int writeToQueue(void *pVnode, void *data, int type, void *pMsg) {
return 0; return 0;
} }
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -78,7 +78,6 @@ int32_t qKillQuery(qinfo_t qinfo); ...@@ -78,7 +78,6 @@ int32_t qKillQuery(qinfo_t qinfo);
int32_t qQueryCompleted(qinfo_t qinfo); int32_t qQueryCompleted(qinfo_t qinfo);
/** /**
* destroy query info structure * destroy query info structure
* @param qHandle * @param qHandle
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册