提交 de1a75f6 编写于 作者: H Hongze Cheng

Merge branch 'develop' into feature/TD-1413

...@@ -4,3 +4,9 @@ ...@@ -4,3 +4,9 @@
[submodule "src/connector/grafanaplugin"] [submodule "src/connector/grafanaplugin"]
path = src/connector/grafanaplugin path = src/connector/grafanaplugin
url = https://github.com/taosdata/grafanaplugin url = https://github.com/taosdata/grafanaplugin
[submodule "tests/examples/rust"]
path = tests/examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
[submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension
url = https://github.com/huskar-t/hivemq-tdengine-extension.git
\ No newline at end of file
...@@ -27,6 +27,7 @@ pipeline { ...@@ -27,6 +27,7 @@ pipeline {
cd debug cd debug
cmake .. > /dev/null cmake .. > /dev/null
make > /dev/null make > /dev/null
make install > /dev/null
cd ${WKC}/tests cd ${WKC}/tests
#./test-all.sh smoke #./test-all.sh smoke
./test-all.sh pytest ./test-all.sh pytest
...@@ -79,7 +80,20 @@ pipeline { ...@@ -79,7 +80,20 @@ pipeline {
cmake .. > /dev/null cmake .. > /dev/null
make > /dev/null make > /dev/null
cd ${WKC}/tests/pytest cd ${WKC}/tests/pytest
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000 ./crash_gen.sh -a -p -t 4 -s 2000
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./handle_crash_gen_val_log.sh
'''
}
sh '''
date date
cd ${WKC}/tests cd ${WKC}/tests
./test-all.sh b2 ./test-all.sh b2
...@@ -124,20 +138,116 @@ pipeline { ...@@ -124,20 +138,116 @@ pipeline {
sh''' sh'''
cd ${WORKSPACE} cd ${WORKSPACE}
git checkout develop git checkout develop
cd tests/gotest '''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/gotest
bash batchtest.sh bash batchtest.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/ cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single >/dev/null mvn clean package assembly:single >/dev/null
java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
''' '''
} }
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
'''
} }
} }
} }
} }
}
}
post {
success {
emailext (
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
}
} }
\ No newline at end of file
...@@ -13,6 +13,7 @@ ELSEIF (TD_WINDOWS) ...@@ -13,6 +13,7 @@ ELSEIF (TD_WINDOWS)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/nodejs DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/nodejs DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/C\# DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .) INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .)
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include) INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)
......
...@@ -4,7 +4,7 @@ PROJECT(TDengine) ...@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER}) SET(TD_VER_NUMBER ${VERNUMBER})
ELSE () ELSE ()
SET(TD_VER_NUMBER "2.0.6.0") SET(TD_VER_NUMBER "2.0.7.0")
ENDIF () ENDIF ()
IF (DEFINED VERCOMPATIBLE) IF (DEFINED VERCOMPATIBLE)
......
...@@ -87,6 +87,7 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修 ...@@ -87,6 +87,7 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修
- httpPort: RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求。 - httpPort: RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求。
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。 - dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。 - logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。
- tempDir:临时文件目录,客户端和服务器的临时文件(主要是查询时用于保存中间结果的问题)将写入该目录。 默认值:Linux下为 /tmp/,Windows下为环境变量 tmp 或 temp 指向的目录。
- arbitrator:系统中裁决器的end point, 缺省值为空。 - arbitrator:系统中裁决器的end point, 缺省值为空。
- role:dnode的可选角色。0-any; 既可作为mnode,也可分配vnode;1-mgmt;只能作为mnode,不能分配vnode;2-dnode;不能作为mnode,只能分配vnode - role:dnode的可选角色。0-any; 既可作为mnode,也可分配vnode;1-mgmt;只能作为mnode,不能分配vnode;2-dnode;不能作为mnode,只能分配vnode
- debugFlag:运行日志开关。131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)。默认值:131或135(不同模块有不同的默认值)。 - debugFlag:运行日志开关。131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)。默认值:131或135(不同模块有不同的默认值)。
......
...@@ -34,7 +34,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 ...@@ -34,7 +34,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
- [SQL写入](https://www.taosdata.com/cn/documentation20/insert/#SQL写入):使用SQL insert命令向一张或多张表写入单条或多条记录 - [SQL写入](https://www.taosdata.com/cn/documentation20/insert/#SQL写入):使用SQL insert命令向一张或多张表写入单条或多条记录
- [Telegraf写入](https://www.taosdata.com/cn/documentation20/insert/#Telegraf直接写入):配置Telegraf, 不用任何代码,将采集数据直接写入 - [Telegraf写入](https://www.taosdata.com/cn/documentation20/insert/#Telegraf直接写入):配置Telegraf, 不用任何代码,将采集数据直接写入
- [Prometheus写入](https://www.taosdata.com/cn/documentation20/insert/#Prometheus直接写入):配置Prometheus, 不用任何代码,将数据直接写入 - [Prometheus写入](https://www.taosdata.com/cn/documentation20/insert/#Prometheus直接写入):配置Prometheus, 不用任何代码,将数据直接写入
- [EMQ X Broker](https://www.taosdata.com/cn/documentation20/insert/#EMQ-X-Broker直接写入):配置EMQ X,不用任何代码,就可将MQTT数据直接写入 - [EMQ X Broker](https://www.taosdata.com/cn/documentation20/insert/#EMQ-X-Broker直接写入):配置EMQ X,不用任何代码,就可将 MQTT 数据直接写入
- [HiveMQ Broker](https://www.taosdata.com/cn/documentation20/insert/#HiveMQ-Broker直接写入):通过 HiveMQ Extension,不用任何代码,就可将 MQTT 数据直接写入
## [高效查询数据](https://www.taosdata.com/cn/documentation20/queries) ## [高效查询数据](https://www.taosdata.com/cn/documentation20/queries)
......
...@@ -10,7 +10,7 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的 ...@@ -10,7 +10,7 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的
* __硬件或云服务成本降至1/5__:由于超强性能,计算资源不到通用大数据方案的1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的1/10。 * __硬件或云服务成本降至1/5__:由于超强性能,计算资源不到通用大数据方案的1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的1/10。
* __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件,大幅降低应用开发和维护的复杂度成本。 * __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件,大幅降低应用开发和维护的复杂度成本。
* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, Matlab随时进行。 * __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, Matlab随时进行。
* __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。 * __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类似标准SQL,支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似,零学习成本。 * __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类似标准SQL,支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似,零学习成本。
采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。 采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。
......
...@@ -90,7 +90,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ...@@ -90,7 +90,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
```mysql ```mysql
ALTER DATABASE db_name REPLICA 2; ALTER DATABASE db_name REPLICA 2;
``` ```
REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于dnode的数目。 REPLICA参数是指修改数据库副本数,取值范围[1, 3]。在集群中使用,副本数必须小于或等于dnode的数目。
```mysql ```mysql
ALTER DATABASE db_name KEEP 365; ALTER DATABASE db_name KEEP 365;
...@@ -844,7 +844,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -844,7 +844,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
- **PERCENTILE** - **PERCENTILE**
```mysql ```mysql
SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]; SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
``` ```
功能说明:统计表中某列的值百分比分位数。 功能说明:统计表中某列的值百分比分位数。
返回结果数据类型: 双精度浮点数Double。 返回结果数据类型: 双精度浮点数Double。
...@@ -1016,9 +1016,9 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER ...@@ -1016,9 +1016,9 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER
``` ```
## TAOS SQL 边界限制 ## TAOS SQL 边界限制
- 数据库名最大长度为33 - 数据库名最大长度为32
- 表名最大长度为193,每行数据最大长度16k个字符 - 表名最大长度为192,每行数据最大长度16k个字符
- 列名最大长度为65,最多允许1024列,最少需要2列,第一列必须是时间戳 - 列名最大长度为64,最多允许1024列,最少需要2列,第一列必须是时间戳
- 标签最多允许128个,可以0个,标签总长度不超过16k个字符 - 标签最多允许128个,可以0个,标签总长度不超过16k个字符
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M - SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
...@@ -35,7 +35,7 @@ TDengine相对于通用数据库,有超高的压缩比,在绝大多数场景 ...@@ -35,7 +35,7 @@ TDengine相对于通用数据库,有超高的压缩比,在绝大多数场景
Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
``` ```
示例:1000万台智能电表,每台电表每15分钟采集一次数据,每次采集的数据128字节,那么一年的原始数据量是:10000000\*128\*24\*60/15*365 = 44851T。TDengine大概需要消耗44851/5=8970T, 8.9P空间。 示例:1000万台智能电表,每台电表每15分钟采集一次数据,每次采集的数据128字节,那么一年的原始数据量是:10000000\*128\*24\*60/15*365 = 44.8512T。TDengine大概需要消耗44.851/5=8.97024T空间。
用户可以通过参数keep,设置数据在磁盘中的最大保存时长。为进一步减少存储成本,TDengine还提供多级存储,最冷的数据可以存放在最廉价的存储介质上,应用的访问不用做任何调整,只是读取速度降低了。 用户可以通过参数keep,设置数据在磁盘中的最大保存时长。为进一步减少存储成本,TDengine还提供多级存储,最冷的数据可以存放在最廉价的存储介质上,应用的访问不用做任何调整,只是读取速度降低了。
...@@ -253,7 +253,7 @@ ALTER USER <user_name> PASS <'password'>; ...@@ -253,7 +253,7 @@ ALTER USER <user_name> PASS <'password'>;
修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角 修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
``` ```
ALTER USER <user_name> PRIVILEDGE <super|write|read>; ALTER USER <user_name> PRIVILEGE <super|write|read>;
``` ```
修改用户权限为:super/write/read,不需要添加单引号 修改用户权限为:super/write/read,不需要添加单引号
......
...@@ -4,16 +4,99 @@ ...@@ -4,16 +4,99 @@
### 物联网典型场景 ### 物联网典型场景
在典型的物联网、车联网、运维监测场景中,往往有多种不同类型的数据采集设备,采集一个到多个不同的物理量。而同一种采集设备类型,往往又有多个具体的采集设备分布在不同的地点。大数据处理系统就是要将各种采集的数据汇总,然后进行计算和分析。对于同一类设备,其采集的数据都是很规则的。以智能电表为例,假设每个智能电表采集电流、电压、相位三个量,其采集的数据类似如下的表格: 在典型的物联网、车联网、运维监测场景中,往往有多种不同类型的数据采集设备,采集一个到多个不同的物理量。而同一种采集设备类型,往往又有多个具体的采集设备分布在不同的地点。大数据处理系统就是要将各种采集的数据汇总,然后进行计算和分析。对于同一类设备,其采集的数据都是很规则的。以智能电表为例,假设每个智能电表采集电流、电压、相位三个量,其采集的数据类似如下的表格:
| Device ID | Time Stamp | current | voltage | phase | location | groupId | <figure><table>
| :-------: | :-----------: | :-----: | :-----: | :---: | :--------------: | :-----: | <thead><tr>
| d1001 | 1538548685000 | 10.3 | 219 | 0.31 | Beijing.Chaoyang | 2 | <th style="text-align:center;">设备ID</th>
| d1002 | 1538548684000 | 10.2 | 220 | 0.23 | Beijing.Chaoyang | 3 | <th style="text-align:center;">时间戳</th>
| d1003 | 1538548686500 | 11.5 | 221 | 0.35 | Beijing.Haidian | 3 | <th style="text-align:center;" colspan="3">采集量</th>
| d1004 | 1538548685500 | 13.4 | 223 | 0.29 | Beijing.Haidian | 2 | <th style="text-align:center;" colspan="2">标签</th>
| d1001 | 1538548695000 | 12.6 | 218 | 0.33 | Beijing.Chaoyang | 2 | </tr>
| d1004 | 1538548696600 | 11.8 | 221 | 0.28 | Beijing.Haidian | 2 |
| d1002 | 1538548696650 | 10.3 | 218 | 0.25 | Beijing.Chaoyang | 3 | <tr>
| d1001 | 1538548696800 | 12.3 | 221 | 0.31 | Beijing.Chaoyang | 2 | <th style="text-align:center;">Device ID</th>
<th style="text-align:center;">Time Stamp</th>
<th style="text-align:center;">current</th>
<th style="text-align:center;">voltage</th>
<th style="text-align:center;">phase</th>
<th style="text-align:center;">location</th>
<th style="text-align:center;">groupId</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align:center;">d1001</td>
<td style="text-align:center;">1538548685000</td>
<td style="text-align:center;">10.3</td>
<td style="text-align:center;">219</td>
<td style="text-align:center;">0.31</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1002</td>
<td style="text-align:center;">1538548684000</td>
<td style="text-align:center;">10.2</td>
<td style="text-align:center;">220</td>
<td style="text-align:center;">0.23</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">3</td>
</tr>
<tr>
<td style="text-align:center;">d1003</td>
<td style="text-align:center;">1538548686500</td>
<td style="text-align:center;">11.5</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.35</td>
<td style="text-align:center;">Beijing.Haidian</td>
<td style="text-align:center;">3</td>
</tr>
<tr>
<td style="text-align:center;">d1004</td>
<td style="text-align:center;">1538548685500</td>
<td style="text-align:center;">13.4</td>
<td style="text-align:center;">223</td>
<td style="text-align:center;">0.29</td>
<td style="text-align:center;">Beijing.Haidian</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1001</td>
<td style="text-align:center;">1538548695000</td>
<td style="text-align:center;">12.6</td>
<td style="text-align:center;">218</td>
<td style="text-align:center;">0.33</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1004</td>
<td style="text-align:center;">1538548696600</td>
<td style="text-align:center;">11.8</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.28</td>
<td style="text-align:center;">Beijing.Haidian</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1002</td>
<td style="text-align:center;">1538548696650</td>
<td style="text-align:center;">10.3</td>
<td style="text-align:center;">218</td>
<td style="text-align:center;">0.25</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">3</td>
</tr>
<tr>
<td style="text-align:center;">d1001</td>
<td style="text-align:center;">1538548696800</td>
<td style="text-align:center;">12.3</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.31</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">2</td>
</tr>
</tbody>
</table></figure>
<center> 表1:智能电表数据示例</center> <center> 表1:智能电表数据示例</center>
...@@ -221,7 +304,7 @@ TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO), ...@@ -221,7 +304,7 @@ TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),
TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,**可通过设置合适的配置参数将TDengine作为数据缓存来使用,而不需要再部署Redis或其他额外的缓存系统**,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的Key-value缓存系统再将之前缓存的数据重新加载到缓存中。 TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,**可通过设置合适的配置参数将TDengine作为数据缓存来使用,而不需要再部署Redis或其他额外的缓存系统**,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的Key-value缓存系统再将之前缓存的数据重新加载到缓存中。
每个vnode有自己独立的内存,而且由多个固定大小的内存块组成,不同vnode之间完全隔离。数据写入时,类似于日志的写法,数据被顺序追加写入内存,但每个vnode维护有自己的skip list,便于迅速查找。当一半以上的内存块写满时,启动落盘操作,而且后续写的操作在新的内存块进行。这样,一个vnode里有一半内存块是保留有最近的数据的,以达到缓存、快速查找的目的。一个vnode的内存块的个数由配置参数blocks决定,内存块的大小由配置参数cache决定。 每个vnode有自己独立的内存,而且由多个固定大小的内存块组成,不同vnode之间完全隔离。数据写入时,类似于日志的写法,数据被顺序追加写入内存,但每个vnode维护有自己的skip list,便于迅速查找。当三分之一以上的内存块写满时,启动落盘操作,而且后续写的操作在新的内存块进行。这样,一个vnode里有三分之一内存块是保留有最近的数据的,以达到缓存、快速查找的目的。一个vnode的内存块的个数由配置参数blocks决定,内存块的大小由配置参数cache决定。
### 持久化存储 ### 持久化存储
TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久化存储。当vnode中缓存的数据达到一定规模时,为了不阻塞后续数据的写入,TDengine也会拉起落盘线程将缓存的数据写入持久化存储。TDengine在数据落盘时会打开新的数据库日志文件,在落盘成功后则会删除老的数据库日志文件,避免日志文件无限制的增长。 TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久化存储。当vnode中缓存的数据达到一定规模时,为了不阻塞后续数据的写入,TDengine也会拉起落盘线程将缓存的数据写入持久化存储。TDengine在数据落盘时会打开新的数据库日志文件,在落盘成功后则会删除老的数据库日志文件,避免日志文件无限制的增长。
......
...@@ -616,6 +616,43 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间 ...@@ -616,6 +616,43 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间
- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式 - httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式
- httpDebugFlag: 日志开关,131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息,默认131 - httpDebugFlag: 日志开关,131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息,默认131
## CSharp Connector
在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(dapper)框架驱动。
#### 安装TDengine客户端
C#连接器需要使用`libtaos.so``taos.h`。因此,在使用C#连接器之前,需在程序运行的Windows环境安装TDengine的Windows客户端,以便获得相关驱动文件。
安装完成后,在文件夹`C:/TDengine/examples/C#`中,将会看到两个文件
- TDengineDriver.cs 调用taos.dll文件的Native C方法
- TDengineTest.cs 参考程序示例
在文件夹`C:\Windows\System32`,将会看到`taos.dll`文件
#### 使用方法
- 将C#接口文件TDengineDriver.cs加入到应用程序所在.NET项目中
- 参考TDengineTest.cs来定义数据库连接参数,及执行数据插入、查询等操作的方法
- 因为C#接口需要用到`taos.dll`文件,用户可以将`taos.dll`文件加入.NET解决方案中
#### 注意事项
- `taos.dll`文件使用x64平台编译,所以.NET项目在生成.exe文件时,“解决方案”/“项目”的“平台”请均选择“x64”。
- 此.NET接口目前已经在Visual Studio 2013/2015/2017中验证过,其它VS版本尚待验证。
#### 第三方驱动
Maikebing.Data.Taos是一个TDengine的ADO.Net提供器,支持linux,windows。该开发包由热心贡献者`麦壳饼@@maikebing`提供,具体请参考
```
//接口下载
https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos
//用法说明
https://www.taosdata.com/blog/2020/11/02/1901.html
```
## Go Connector ## Go Connector
......
...@@ -38,9 +38,9 @@ ...@@ -38,9 +38,9 @@
6. 检查防火墙设置,确认TCP/UDP 端口6030-6042 是打开的 6. 检查防火墙设置,确认TCP/UDP 端口6030-6042 是打开的
7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH* 7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*
8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*) 8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
9. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅 9. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
检查UDP端口连接是否工作:`nc -vuz {hostIP} {port} ` 检查UDP端口连接是否工作:`nc -vuz {hostIP} {port} `
......
# 高效写入数据 # 高效写入数据
TDengine支持多种接口写入数据,包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, CSV文件等,后续还将提供Kafka, OPC等接口。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。 TDengine支持多种接口写入数据,包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV文件等,后续还将提供Kafka, OPC等接口。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。
## SQL写入 ## SQL写入
...@@ -218,7 +218,15 @@ use telegraf; ...@@ -218,7 +218,15 @@ use telegraf;
select * from cpu; select * from cpu;
``` ```
## EMQ X Broker直接写入
MQTT是一流行的物联网数据传输协议,[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDEngine,也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考 [EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)
MQTT是一流行的物联网数据传输协议,TDengine 可以很方便的接入 MQTT Broker 接受的数据并写入到 TDengine。
## EMQ Broker 直接写入
[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件,无需任何代码,只需要在EMQ Dashboard里使用“规则”做简单配置,即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine,也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考 [EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)
## HiveMQ Broker 直接写入
[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器M2M通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)
...@@ -20,6 +20,9 @@ ...@@ -20,6 +20,9 @@
# data file's directory # data file's directory
# dataDir /var/lib/taos # dataDir /var/lib/taos
# temporary file's directory
# tempDir /tmp/
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only # the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
# arbitrator arbitrator_hostname:6042 # arbitrator arbitrator_hostname:6042
...@@ -256,3 +259,5 @@ ...@@ -256,3 +259,5 @@
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden # maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30 # maxBinaryDisplayWidth 30
# enable/disable telemetry reporting
# telemetryReporting 1
\ No newline at end of file
...@@ -48,6 +48,7 @@ cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_pat ...@@ -48,6 +48,7 @@ cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_pat
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
...@@ -58,7 +59,7 @@ cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_pat ...@@ -58,7 +59,7 @@ cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_pat
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||:
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/ cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
chmod 755 ${pkg_dir}/DEBIAN/* chmod 755 ${pkg_dir}/DEBIAN/*
......
...@@ -156,9 +156,15 @@ build_time=$(date +"%F %R") ...@@ -156,9 +156,15 @@ build_time=$(date +"%F %R")
# get commint id from git # get commint id from git
gitinfo=$(git rev-parse --verify HEAD) gitinfo=$(git rev-parse --verify HEAD)
enterprise_dir="${top_dir}/../enterprise"
cd ${enterprise_dir} if [[ "$verMode" == "cluster" ]]; then
gitinfoOfInternal=$(git rev-parse --verify HEAD) enterprise_dir="${top_dir}/../enterprise"
cd ${enterprise_dir}
gitinfoOfInternal=$(git rev-parse --verify HEAD)
else
gitinfoOfInternal=NULL
fi
cd ${curr_dir} cd ${curr_dir}
# 2. cmake executable file # 2. cmake executable file
...@@ -193,6 +199,9 @@ cd ${curr_dir} ...@@ -193,6 +199,9 @@ cd ${curr_dir}
# 3. Call the corresponding script for packaging # 3. Call the corresponding script for packaging
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
ret='0'
command -v dpkg >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
echo "====do deb package for the ubuntu system====" echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs" output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then if [ -d ${output_dir} ]; then
...@@ -201,7 +210,13 @@ if [ "$osType" != "Darwin" ]; then ...@@ -201,7 +210,13 @@ if [ "$osType" != "Darwin" ]; then
${csudo} mkdir -p ${output_dir} ${csudo} mkdir -p ${output_dir}
cd ${script_dir}/deb cd ${script_dir}/deb
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
else
echo "==========dpkg command not exist, so not release deb package!!!"
fi
ret='0'
command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
echo "====do rpm package for the centos system====" echo "====do rpm package for the centos system===="
output_dir="${top_dir}/rpms" output_dir="${top_dir}/rpms"
if [ -d ${output_dir} ]; then if [ -d ${output_dir} ]; then
...@@ -210,6 +225,9 @@ if [ "$osType" != "Darwin" ]; then ...@@ -210,6 +225,9 @@ if [ "$osType" != "Darwin" ]; then
${csudo} mkdir -p ${output_dir} ${csudo} mkdir -p ${output_dir}
cd ${script_dir}/rpm cd ${script_dir}/rpm
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
else
echo "==========rpmbuild command not exist, so not release rpm package!!!"
fi
fi fi
echo "====do tar.gz package for all systems====" echo "====do tar.gz package for all systems===="
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
%define cfg_install_dir /etc/taos %define cfg_install_dir /etc/taos
%define __strip /bin/true %define __strip /bin/true
Name: TDengine Name: tdengine
Version: %{_version} Version: %{_version}
Release: 3%{?dist} Release: 3%{?dist}
Summary: tdengine from taosdata Summary: tdengine from taosdata
...@@ -58,6 +58,7 @@ cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/scri ...@@ -58,6 +58,7 @@ cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/scri
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
...@@ -65,7 +66,7 @@ cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/conn ...@@ -65,7 +66,7 @@ cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/conn
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
#Scripts executed before installation #Scripts executed before installation
...@@ -134,6 +135,7 @@ if [ $1 -eq 0 ];then ...@@ -134,6 +135,7 @@ if [ $1 -eq 0 ];then
${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdemo || :
#${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || : ${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || :
......
#!/bin/bash
#
log_dir=$1
result_file=$2
if [ ! -n "$1" ];then
echo "Pleas input the director of taosdlog."
echo "usage: ./get_client.sh <taosdlog directory> <result file>"
exit 1
else
log_dir=$1
fi
if [ ! -n "$2" ];then
result_file=clientInfo.txt
else
result_file=$2
fi
grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file}
...@@ -278,11 +278,11 @@ function install_service_on_sysvinit() { ...@@ -278,11 +278,11 @@ function install_service_on_sysvinit() {
# Install taosd service # Install taosd service
if ((${os_type}==1)); then if ((${os_type}==1)); then
${csudo} cp -f ${script_dir}/../deb/init.d/taosd ${install_main_dir}/init.d ${csudo} cp -f ${script_dir}/../deb/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../deb/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd ${csudo} cp ${script_dir}/../deb/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
elif ((${os_type}==2)); then elif ((${os_type}==2)); then
${csudo} cp -f ${script_dir}/../rpm/init.d/taosd ${install_main_dir}/init.d ${csudo} cp -f ${script_dir}/../rpm/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../rpm/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd ${csudo} cp ${script_dir}/../rpm/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
fi fi
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
......
...@@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then ...@@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else else
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh" bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
fi fi
lib_files="${build_dir}/lib/libtaos.so.${version}" lib_files="${build_dir}/lib/libtaos.so.${version}"
else else
...@@ -110,7 +110,7 @@ mkdir -p ${install_dir}/connector ...@@ -110,7 +110,7 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/
......
...@@ -77,7 +77,9 @@ if [ "$osType" != "Darwin" ]; then ...@@ -77,7 +77,9 @@ if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
fi fi
else else
cp ${bin_files} ${install_dir}/bin cp ${bin_files} ${install_dir}/bin
...@@ -135,7 +137,7 @@ mkdir -p ${install_dir}/connector ...@@ -135,7 +137,7 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/
......
...@@ -36,7 +36,7 @@ if [ "$pagMode" == "lite" ]; then ...@@ -36,7 +36,7 @@ if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
else else
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh" bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
fi fi
lib_files="${build_dir}/lib/libtaos.so.${version}" lib_files="${build_dir}/lib/libtaos.so.${version}"
...@@ -124,7 +124,7 @@ cp ${lib_files} ${install_dir}/driver ...@@ -124,7 +124,7 @@ cp ${lib_files} ${install_dir}/driver
connector_dir="${code_dir}/connector" connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
......
...@@ -77,8 +77,10 @@ else ...@@ -77,8 +77,10 @@ else
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
fi fi
chmod a+x ${install_dir}/bin/* || : chmod a+x ${install_dir}/bin/* || :
...@@ -156,7 +158,7 @@ cp ${lib_files} ${install_dir}/driver ...@@ -156,7 +158,7 @@ cp ${lib_files} ${install_dir}/driver
connector_dir="${code_dir}/connector" connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/ cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector cp -r ${connector_dir}/go ${install_dir}/connector
......
...@@ -81,8 +81,10 @@ function install_lib() { ...@@ -81,8 +81,10 @@ function install_lib() {
${csudo} ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
} }
function install_bin() { function install_bin() {
...@@ -121,7 +123,10 @@ function install_config() { ...@@ -121,7 +123,10 @@ function install_config() {
echo -e -n "${GREEN}Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join${NC}" echo -e -n "${GREEN}Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join${NC}"
echo echo
echo -e -n "${GREEN}OR leave it blank to build one${NC}:" echo -e -n "${GREEN}OR leave it blank to build one${NC}:"
read firstEp #read firstEp
if exec < /dev/tty; then
read firstEp;
fi
while true; do while true; do
if [ ! -z "$firstEp" ]; then if [ ! -z "$firstEp" ]; then
# check the format of the firstEp # check the format of the firstEp
......
name: tdengine name: tdengine
base: core18 base: core18
version: '2.0.5.1' version: '2.0.7.0'
icon: snap/gui/t-dengine.svg icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT. summary: an open-source big data platform designed and optimized for IoT.
description: | description: |
...@@ -72,7 +72,7 @@ parts: ...@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd - usr/bin/taosd
- usr/bin/taos - usr/bin/taos
- usr/bin/taosdemo - usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.5.1 - usr/lib/libtaos.so.2.0.6.0
- usr/lib/libtaos.so.1 - usr/lib/libtaos.so.1
- usr/lib/libtaos.so - usr/lib/libtaos.so
......
...@@ -490,7 +490,7 @@ static bool balanceMontiorDropping() { ...@@ -490,7 +490,7 @@ static bool balanceMontiorDropping() {
if (pDnode->status == TAOS_DN_STATUS_OFFLINE) { if (pDnode->status == TAOS_DN_STATUS_OFFLINE) {
if (pDnode->lastAccess + tsOfflineThreshold > tsAccessSquence) continue; if (pDnode->lastAccess + tsOfflineThreshold > tsAccessSquence) continue;
if (strcmp(pDnode->dnodeEp, dnodeGetMnodeMasterEp()) == 0) continue; if (dnodeIsMasterEp(pDnode->dnodeEp)) continue;
if (mnodeGetDnodesNum() <= 1) continue; if (mnodeGetDnodesNum() <= 1) continue;
mLInfo("dnode:%d, set to removing state for it offline:%d seconds", pDnode->dnodeId, mLInfo("dnode:%d, set to removing state for it offline:%d seconds", pDnode->dnodeId,
...@@ -571,8 +571,8 @@ static void balanceCheckDnodeAccess() { ...@@ -571,8 +571,8 @@ static void balanceCheckDnodeAccess() {
if (pDnode->status != TAOS_DN_STATUS_DROPPING && pDnode->status != TAOS_DN_STATUS_OFFLINE) { if (pDnode->status != TAOS_DN_STATUS_DROPPING && pDnode->status != TAOS_DN_STATUS_OFFLINE) {
pDnode->status = TAOS_DN_STATUS_OFFLINE; pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->offlineReason = TAOS_DN_OFF_STATUS_MSG_TIMEOUT; pDnode->offlineReason = TAOS_DN_OFF_STATUS_MSG_TIMEOUT;
mInfo("dnode:%d, set to offline state, access seq:%d, last seq:%d", pDnode->dnodeId, tsAccessSquence, mInfo("dnode:%d, set to offline state, access seq:%d last seq:%d laststat:%d", pDnode->dnodeId, tsAccessSquence,
pDnode->lastAccess); pDnode->lastAccess, pDnode->status);
balanceSetVgroupOffline(pDnode); balanceSetVgroupOffline(pDnode);
} }
} }
......
...@@ -62,11 +62,11 @@ typedef struct SLocalReducer { ...@@ -62,11 +62,11 @@ typedef struct SLocalReducer {
bool hasUnprocessedRow; bool hasUnprocessedRow;
tOrderDescriptor * pDesc; tOrderDescriptor * pDesc;
SColumnModel * resColModel; SColumnModel * resColModel;
SColumnModel* finalModel;
tExtMemBuffer ** pExtMemBuffer; // disk-based buffer tExtMemBuffer ** pExtMemBuffer; // disk-based buffer
SFillInfo* pFillInfo; // interpolation support structure SFillInfo* pFillInfo; // interpolation support structure
char * pFinalRes; // result data after interpo char* pFinalRes; // result data after interpo
tFilePage * discardData; tFilePage* discardData;
SResultInfo * pResInfo;
bool discard; bool discard;
int32_t offset; // limit offset value int32_t offset; // limit offset value
bool orderPrjOnSTable; // projection query on stable bool orderPrjOnSTable; // projection query on stable
...@@ -75,7 +75,8 @@ typedef struct SLocalReducer { ...@@ -75,7 +75,8 @@ typedef struct SLocalReducer {
typedef struct SRetrieveSupport { typedef struct SRetrieveSupport {
tExtMemBuffer ** pExtMemBuffer; // for build loser tree tExtMemBuffer ** pExtMemBuffer; // for build loser tree
tOrderDescriptor *pOrderDescriptor; tOrderDescriptor *pOrderDescriptor;
SColumnModel * pFinalColModel; // colModel for final result SColumnModel* pFinalColModel; // colModel for final result
SColumnModel* pFFColModel;
int32_t subqueryIndex; // index of current vnode in vnode list int32_t subqueryIndex; // index of current vnode in vnode list
SSqlObj * pParentSql; SSqlObj * pParentSql;
tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
...@@ -97,7 +98,7 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF ...@@ -97,7 +98,7 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
* create local reducer to launch the second-stage reduce process at client site * create local reducer to launch the second-stage reduce process at client site
*/ */
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc, void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalModel, SSqlObj* pSql); SColumnModel *finalModel, SColumnModel *pFFModel, SSqlObj* pSql);
void tscDestroyLocalReducer(SSqlObj *pSql); void tscDestroyLocalReducer(SSqlObj *pSql);
......
...@@ -23,7 +23,7 @@ extern "C" { ...@@ -23,7 +23,7 @@ extern "C" {
#include "tscUtil.h" #include "tscUtil.h"
#include "tsclient.h" #include "tsclient.h"
void tscFetchDatablockFromSubquery(SSqlObj* pSql); void tscFetchDatablockForSubquery(SSqlObj* pSql);
void tscSetupOutputColumnIndex(SSqlObj* pSql); void tscSetupOutputColumnIndex(SSqlObj* pSql);
void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code); void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code);
...@@ -41,6 +41,8 @@ int32_t tscHandleInsertRetry(SSqlObj* pSql); ...@@ -41,6 +41,8 @@ int32_t tscHandleInsertRetry(SSqlObj* pSql);
void tscBuildResFromSubqueries(SSqlObj *pSql); void tscBuildResFromSubqueries(SSqlObj *pSql);
TAOS_ROW doSetResultRowData(SSqlObj *pSql, bool finalResult); TAOS_ROW doSetResultRowData(SSqlObj *pSql, bool finalResult);
char *getArithemicInputSrc(void *param, const char *name, int32_t colId);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -75,6 +75,7 @@ typedef struct SJoinSupporter { ...@@ -75,6 +75,7 @@ typedef struct SJoinSupporter {
SArray* exprList; SArray* exprList;
SFieldInfo fieldsInfo; SFieldInfo fieldsInfo;
STagCond tagCond; STagCond tagCond;
SSqlGroupbyExpr groupInfo; // group by info
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
FILE* f; // temporary file in order to create TSBuf FILE* f; // temporary file in order to create TSBuf
char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory
...@@ -86,7 +87,7 @@ typedef struct SJoinSupporter { ...@@ -86,7 +87,7 @@ typedef struct SJoinSupporter {
} SJoinSupporter; } SJoinSupporter;
typedef struct SVgroupTableInfo { typedef struct SVgroupTableInfo {
SCMVgroupInfo vgInfo; SVgroupInfo vgInfo;
SArray* itemList; //SArray<STableIdInfo> SArray* itemList; //SArray<STableIdInfo>
} SVgroupTableInfo; } SVgroupTableInfo;
...@@ -124,6 +125,7 @@ int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t ...@@ -124,6 +125,7 @@ int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t
*/ */
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo); bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscIsTWAQuery(SQueryInfo* pQueryInfo); bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex); bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex); bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
...@@ -157,7 +159,7 @@ SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t ind ...@@ -157,7 +159,7 @@ SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t ind
TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index); TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index);
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo); void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo); void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index); int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index);
void tscFieldInfoClear(SFieldInfo* pFieldInfo); void tscFieldInfoClear(SFieldInfo* pFieldInfo);
...@@ -166,15 +168,15 @@ static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQue ...@@ -166,15 +168,15 @@ static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQue
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2); int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex); void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
int32_t tscGetResRowLength(SArray* pExprList); int32_t tscGetResRowLength(SArray* pExprList);
SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t interSize, bool isTagCol); int16_t size, int16_t resColId, int16_t interSize, bool isTagCol);
SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t interSize, bool isTagCol); int16_t size, int16_t resColId, int16_t interSize, bool isTagCol);
SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
int16_t size); int16_t size);
...@@ -225,8 +227,9 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo); ...@@ -225,8 +227,9 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo);
void tscClearSubqueryInfo(SSqlCmd* pCmd); void tscClearSubqueryInfo(SSqlCmd* pCmd);
void tscFreeVgroupTableInfo(SArray* pVgroupTables); void tscFreeVgroupTableInfo(SArray* pVgroupTables);
SArray* tscCloneVgroupTableInfo(SArray* pVgroupTables); SArray* tscVgroupTableInfoClone(SArray* pVgroupTables);
void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index); void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index);
void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo);
int tscGetSTableVgroupInfo(SSqlObj* pSql, int32_t clauseIndex); int tscGetSTableVgroupInfo(SSqlObj* pSql, int32_t clauseIndex);
int tscGetTableMeta(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo); int tscGetTableMeta(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo);
...@@ -237,7 +240,7 @@ void tscDoQuery(SSqlObj* pSql); ...@@ -237,7 +240,7 @@ void tscDoQuery(SSqlObj* pSql);
SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo); SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo);
void* tscVgroupInfoClear(SVgroupsInfo *pInfo); void* tscVgroupInfoClear(SVgroupsInfo *pInfo);
void tscSCMVgroupInfoCopy(SCMVgroupInfo* dst, const SCMVgroupInfo* src); void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src);
/** /**
* The create object function must be successful expect for the out of memory issue. * The create object function must be successful expect for the out of memory issue.
* *
...@@ -265,6 +268,7 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t sub ...@@ -265,6 +268,7 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t sub
void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex); void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex);
int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid); int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid);
int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId);
void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex); void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex);
......
...@@ -77,7 +77,7 @@ SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex); ...@@ -77,7 +77,7 @@ SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
* @param colId * @param colId
* @return * @return
*/ */
SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId); SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
/** /**
* check if the schema is valid or not, including following aspects: * check if the schema is valid or not, including following aspects:
...@@ -107,9 +107,6 @@ SSchema tscGetTbnameColumnSchema(); ...@@ -107,9 +107,6 @@ SSchema tscGetTbnameColumnSchema();
*/ */
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size); STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size);
//todo tags value as well as the table id structure needs refactor
char *tsGetTagsValue(STableMeta *pMeta);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -30,6 +30,7 @@ extern "C" { ...@@ -30,6 +30,7 @@ extern "C" {
#include "tsqlfunction.h" #include "tsqlfunction.h"
#include "tutil.h" #include "tutil.h"
#include "tcache.h" #include "tcache.h"
#include "tref.h"
#include "qExecutor.h" #include "qExecutor.h"
#include "qSqlparser.h" #include "qSqlparser.h"
...@@ -89,12 +90,12 @@ typedef struct STableComInfo { ...@@ -89,12 +90,12 @@ typedef struct STableComInfo {
int32_t rowSize; int32_t rowSize;
} STableComInfo; } STableComInfo;
typedef struct SCMCorVgroupInfo { typedef struct SCorVgroupInfo {
int32_t version; int32_t version;
int8_t inUse; int8_t inUse;
int8_t numOfEps; int8_t numOfEps;
SEpAddr1 epAddr[TSDB_MAX_REPLICA]; SEpAddr1 epAddr[TSDB_MAX_REPLICA];
} SCMCorVgroupInfo; } SCorVgroupInfo;
typedef struct STableMeta { typedef struct STableMeta {
STableComInfo tableInfo; STableComInfo tableInfo;
...@@ -102,8 +103,8 @@ typedef struct STableMeta { ...@@ -102,8 +103,8 @@ typedef struct STableMeta {
int16_t sversion; int16_t sversion;
int16_t tversion; int16_t tversion;
char sTableId[TSDB_TABLE_FNAME_LEN]; char sTableId[TSDB_TABLE_FNAME_LEN];
SCMVgroupInfo vgroupInfo; SVgroupInfo vgroupInfo;
SCMCorVgroupInfo corVgroupInfo; SCorVgroupInfo corVgroupInfo;
STableId id; STableId id;
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
} STableMeta; } STableMeta;
...@@ -127,7 +128,7 @@ typedef struct STableMetaInfo { ...@@ -127,7 +128,7 @@ typedef struct STableMetaInfo {
typedef struct SSqlExpr { typedef struct SSqlExpr {
char aliasName[TSDB_COL_NAME_LEN]; // as aliasName char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
SColIndex colInfo; SColIndex colInfo;
int64_t uid; // refactor use the pointer uint64_t uid; // refactor use the pointer
int16_t functionId; // function id in aAgg array int16_t functionId; // function id in aAgg array
int16_t resType; // return value type int16_t resType; // return value type
int16_t resBytes; // length of return value int16_t resBytes; // length of return value
...@@ -135,6 +136,7 @@ typedef struct SSqlExpr { ...@@ -135,6 +136,7 @@ typedef struct SSqlExpr {
int16_t numOfParams; // argument value of each function int16_t numOfParams; // argument value of each function
tVariant param[3]; // parameters are not more than 3 tVariant param[3]; // parameters are not more than 3
int32_t offset; // sub result column value of arithmetic expression. int32_t offset; // sub result column value of arithmetic expression.
int16_t resColId; // result column id
} SSqlExpr; } SSqlExpr;
typedef struct SColumnIndex { typedef struct SColumnIndex {
...@@ -250,6 +252,7 @@ typedef struct SQueryInfo { ...@@ -250,6 +252,7 @@ typedef struct SQueryInfo {
int64_t clauseLimit; // limit for current sub clause int64_t clauseLimit; // limit for current sub clause
int64_t prjOffset; // offset value in the original sql expression, only applied at client side int64_t prjOffset; // offset value in the original sql expression, only applied at client side
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
int16_t resColumnId; // result column id
} SQueryInfo; } SQueryInfo;
typedef struct { typedef struct {
...@@ -329,6 +332,7 @@ typedef struct STscObj { ...@@ -329,6 +332,7 @@ typedef struct STscObj {
char writeAuth : 1; char writeAuth : 1;
char superAuth : 1; char superAuth : 1;
uint32_t connId; uint32_t connId;
uint64_t rid; // ref ID returned by taosAddRef
struct SSqlObj * pHb; struct SSqlObj * pHb;
struct SSqlObj * sqlList; struct SSqlObj * sqlList;
struct SSqlStream *streamList; struct SSqlStream *streamList;
...@@ -347,7 +351,7 @@ typedef struct SSqlObj { ...@@ -347,7 +351,7 @@ typedef struct SSqlObj {
void *signature; void *signature;
pthread_t owner; // owner of sql object, by which it is executed pthread_t owner; // owner of sql object, by which it is executed
STscObj *pTscObj; STscObj *pTscObj;
void *pRpcCtx; int64_t rpcRid;
void (*fp)(); void (*fp)();
void (*fetchFp)(); void (*fetchFp)();
void *param; void *param;
...@@ -430,14 +434,6 @@ void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache); ...@@ -430,14 +434,6 @@ void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache);
*/ */
void tscFreeSqlResult(SSqlObj *pSql); void tscFreeSqlResult(SSqlObj *pSql);
/**
* only free part of resources allocated during query.
* TODO remove it later
* Note: this function is multi-thread safe.
* @param pObj
*/
void tscPartiallyFreeSqlObj(SSqlObj *pSql);
/** /**
* free sql object, release allocated resource * free sql object, release allocated resource
* @param pObj * @param pObj
...@@ -446,7 +442,7 @@ void tscFreeSqlObj(SSqlObj *pSql); ...@@ -446,7 +442,7 @@ void tscFreeSqlObj(SSqlObj *pSql);
void tscFreeRegisteredSqlObj(void *pSql); void tscFreeRegisteredSqlObj(void *pSql);
void tscFreeTableMetaHelper(void *pTableMeta); void tscFreeTableMetaHelper(void *pTableMeta);
void tscCloseTscObj(STscObj *pObj); void tscCloseTscObj(void *pObj);
// todo move to taos? or create a new file: taos_internal.h // todo move to taos? or create a new file: taos_internal.h
TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
...@@ -468,17 +464,16 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s ...@@ -468,17 +464,16 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo); int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo);
static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex, int32_t offset) {
SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pFieldInfo->internalField, columnIndex); SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pFieldInfo->internalField, columnIndex);
assert(pInfo->pSqlExpr != NULL);
int32_t type = pInfo->pSqlExpr->resType; int32_t type = pInfo->field.type;
int32_t bytes = pInfo->pSqlExpr->resBytes; int32_t bytes = pInfo->field.bytes;
char* pData = pRes->data + (int32_t)(pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row); char* pData = pRes->data + (int32_t)(offset * pRes->numOfRows + bytes * pRes->row);
// user defined constant value output columns // user defined constant value output columns
if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) { if (pInfo->pSqlExpr != NULL && TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) { if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
pData = pInfo->pSqlExpr->param[1].pz; pData = pInfo->pSqlExpr->param[1].pz;
pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen; pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
...@@ -516,13 +511,14 @@ extern void * tscQhandle; ...@@ -516,13 +511,14 @@ extern void * tscQhandle;
extern int tscKeepConn[]; extern int tscKeepConn[];
extern int tsInsertHeadSize; extern int tsInsertHeadSize;
extern int tscNumOfThreads; extern int tscNumOfThreads;
extern int tscRefId;
extern SRpcCorEpSet tscMgmtEpSet; extern SRpcCorEpSet tscMgmtEpSet;
extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo); extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);
int32_t tscCompareTidTags(const void* p1, const void* p2);
void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables); void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables);
int16_t getNewResColId(SQueryInfo* pQueryInfo);
#ifdef __cplusplus #ifdef __cplusplus
} }
......
...@@ -176,7 +176,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo ...@@ -176,7 +176,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
} }
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) { if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql); tscFetchDatablockForSubquery(pSql);
} else { } else {
tscProcessSql(pSql); tscProcessSql(pSql);
} }
...@@ -226,7 +226,7 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) { ...@@ -226,7 +226,7 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
// handle the sub queries of join query // handle the sub queries of join query
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) { if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql); tscFetchDatablockForSubquery(pSql);
} else if (pRes->completed) { } else if (pRes->completed) {
if(pCmd->command == TSDB_SQL_FETCH || (pCmd->command >= TSDB_SQL_SERV_STATUS && pCmd->command <= TSDB_SQL_CURRENT_USER)) { if(pCmd->command == TSDB_SQL_FETCH || (pCmd->command >= TSDB_SQL_SERV_STATUS && pCmd->command <= TSDB_SQL_CURRENT_USER)) {
if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes. if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
...@@ -351,7 +351,7 @@ void tscProcessFetchRow(SSchedMsg *pMsg) { ...@@ -351,7 +351,7 @@ void tscProcessFetchRow(SSchedMsg *pMsg) {
SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i); SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
if (pSup->pSqlExpr != NULL) { if (pSup->pSqlExpr != NULL) {
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i); tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, 0);
} else { } else {
// todo add // todo add
} }
...@@ -405,7 +405,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { ...@@ -405,7 +405,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
pRes->code = code; pRes->code = code;
const char* msg = (pCmd->command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta"; SSqlObj *sub = (SSqlObj*) res;
const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code)); tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code));
goto _error; goto _error;
...@@ -428,7 +429,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { ...@@ -428,7 +429,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
assert(code == TSDB_CODE_SUCCESS); assert(code == TSDB_CODE_SUCCESS);
} }
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0) && pSql->param != NULL); // param already freed by other routine and pSql in tscCache when ctrl + c
if (atomic_load_ptr(&pSql->param) == NULL) {
return;
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param; SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param;
SSqlObj * pParObj = trs->pParentSql; SSqlObj * pParObj = trs->pParentSql;
...@@ -437,6 +442,20 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { ...@@ -437,6 +442,20 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex && assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex &&
pTableMetaInfo->vgroupIndex >= 0 && pTableMetaInfo->vgroupList != NULL); pTableMetaInfo->vgroupIndex >= 0 && pTableMetaInfo->vgroupList != NULL);
// tscProcessSql can add error into async res
tscProcessSql(pSql);
return;
} else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) {
tscDebug("%p update table meta in local cache, continue to process sql and send corresponding tid_tag query", pSql);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
} else {
assert(code == TSDB_CODE_SUCCESS);
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
// tscProcessSql can add error into async res // tscProcessSql can add error into async res
tscProcessSql(pSql); tscProcessSql(pSql);
return; return;
...@@ -461,7 +480,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { ...@@ -461,7 +480,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
tscResetSqlCmdObj(pCmd, false); tscResetSqlCmdObj(pCmd, false);
code = tsParseSql(pSql, true); code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return; return;
} else if (code != TSDB_CODE_SUCCESS) { } else if (code != TSDB_CODE_SUCCESS) {
......
此差异已折叠。
...@@ -49,82 +49,6 @@ typedef struct SCreateBuilder { ...@@ -49,82 +49,6 @@ typedef struct SCreateBuilder {
} SCreateBuilder; } SCreateBuilder;
static void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength); static void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength);
static int32_t getToStringLength(const char *pData, int32_t length, int32_t type) {
char buf[512] = {0};
int32_t len = 0;
int32_t MAX_BOOL_TYPE_LENGTH = 5; // max(strlen("true"), strlen("false"));
switch (type) {
case TSDB_DATA_TYPE_BINARY:
return length;
case TSDB_DATA_TYPE_NCHAR:
return length;
case TSDB_DATA_TYPE_DOUBLE: {
double dv = 0;
dv = GET_DOUBLE_VAL(pData);
len = sprintf(buf, "%lf", dv);
if (strncasecmp("nan", buf, 3) == 0) {
len = 4;
}
} break;
case TSDB_DATA_TYPE_FLOAT: {
float fv = 0;
fv = GET_FLOAT_VAL(pData);
len = sprintf(buf, "%f", fv);
if (strncasecmp("nan", buf, 3) == 0) {
len = 4;
}
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
len = sprintf(buf, "%" PRId64, *(int64_t *)pData);
break;
case TSDB_DATA_TYPE_BOOL:
len = MAX_BOOL_TYPE_LENGTH;
break;
default:
len = sprintf(buf, "%d", *(int32_t *)pData);
break;
};
return len;
}
/*
* we need to convert all data into string, so we need to sprintf all kinds of
* non-string data into string, and record its length to get the right
* maximum length. The length may be less or greater than its original binary length:
* For example:
* length((short) 1) == 1, less than sizeof(short)
* length((uint64_t) 123456789011) > 12, greater than sizsof(uint64_t)
*/
static int32_t tscMaxLengthOfTagsFields(SSqlObj *pSql) {
STableMeta *pMeta = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0)->pTableMeta;
if (pMeta->tableType == TSDB_SUPER_TABLE || pMeta->tableType == TSDB_NORMAL_TABLE ||
pMeta->tableType == TSDB_STREAM_TABLE) {
return 0;
}
char * pTagValue = tsGetTagsValue(pMeta);
SSchema *pTagsSchema = tscGetTableTagSchema(pMeta);
int32_t len = getToStringLength(pTagValue, pTagsSchema[0].bytes, pTagsSchema[0].type);
pTagValue += pTagsSchema[0].bytes;
int32_t numOfTags = tscGetNumOfTags(pMeta);
for (int32_t i = 1; i < numOfTags; ++i) {
int32_t tLen = getToStringLength(pTagValue, pTagsSchema[i].bytes, pTagsSchema[i].type);
if (len < tLen) {
len = tLen;
}
pTagValue += pTagsSchema[i].bytes;
}
return len;
}
static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
...@@ -186,8 +110,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { ...@@ -186,8 +110,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
return 0; return 0;
} }
// the following is handle display tags value for meters created according to metric // the following is handle display tags for table created according to super table
char *pTagValue = tsGetTagsValue(pMeta);
for (int32_t i = numOfRows; i < totalNumOfRows; ++i) { for (int32_t i = numOfRows; i < totalNumOfRows; ++i) {
// field name // field name
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0); TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0);
...@@ -219,8 +142,6 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { ...@@ -219,8 +142,6 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
char *target = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i; char *target = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i;
const char *src = "TAG"; const char *src = "TAG";
STR_WITH_MAXSIZE_TO_VARSTR(target, src, pField->bytes); STR_WITH_MAXSIZE_TO_VARSTR(target, src, pField->bytes);
pTagValue += pSchema[i].bytes;
} }
return 0; return 0;
...@@ -241,7 +162,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, ...@@ -241,7 +162,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
(TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, (TSDB_COL_NAME_LEN - 1), false); (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, -1000, (TSDB_COL_NAME_LEN - 1), false);
rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE); rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE);
...@@ -251,7 +172,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, ...@@ -251,7 +172,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE), pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE),
typeColLength, false); -1000, typeColLength, false);
rowLen += typeColLength + VARSTR_HEADER_SIZE; rowLen += typeColLength + VARSTR_HEADER_SIZE;
...@@ -261,7 +182,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, ...@@ -261,7 +182,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t), pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t),
sizeof(int32_t), false); -1000, sizeof(int32_t), false);
rowLen += sizeof(int32_t); rowLen += sizeof(int32_t);
...@@ -271,7 +192,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, ...@@ -271,7 +192,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE), pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE),
noteColLength, false); -1000, noteColLength, false);
rowLen += noteColLength + VARSTR_HEADER_SIZE; rowLen += noteColLength + VARSTR_HEADER_SIZE;
return rowLen; return rowLen;
...@@ -286,10 +207,10 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) { ...@@ -286,10 +207,10 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
const int32_t TYPE_COLUMN_LENGTH = 16; const int32_t TYPE_COLUMN_LENGTH = 16;
const int32_t NOTE_COLUMN_MIN_LENGTH = 8; const int32_t NOTE_COLUMN_MIN_LENGTH = 8;
int32_t noteFieldLen = tscMaxLengthOfTagsFields(pSql); int32_t noteFieldLen = NOTE_COLUMN_MIN_LENGTH;//tscMaxLengthOfTagsFields(pSql);
if (noteFieldLen == 0) { // if (noteFieldLen == 0) {
noteFieldLen = NOTE_COLUMN_MIN_LENGTH; // noteFieldLen = NOTE_COLUMN_MIN_LENGTH;
} // }
int32_t rowLen = tscBuildTableSchemaResultFields(pSql, NUM_OF_DESC_TABLE_COLUMNS, TYPE_COLUMN_LENGTH, noteFieldLen); int32_t rowLen = tscBuildTableSchemaResultFields(pSql, NUM_OF_DESC_TABLE_COLUMNS, TYPE_COLUMN_LENGTH, noteFieldLen);
tscFieldInfoUpdateOffset(pQueryInfo); tscFieldInfoUpdateOffset(pQueryInfo);
...@@ -486,8 +407,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const ...@@ -486,8 +407,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
} }
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false);
f.bytes, f.bytes - VARSTR_HEADER_SIZE, false);
rowLen += f.bytes; rowLen += f.bytes;
...@@ -501,7 +421,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const ...@@ -501,7 +421,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
(int16_t)(ddlLen + VARSTR_HEADER_SIZE), ddlLen, false); (int16_t)(ddlLen + VARSTR_HEADER_SIZE), -1000, ddlLen, false);
rowLen += ddlLen + VARSTR_HEADER_SIZE; rowLen += ddlLen + VARSTR_HEADER_SIZE;
...@@ -698,7 +618,11 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, ...@@ -698,7 +618,11 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName,
for (int32_t i = 0; i < numOfRows; ++i) { for (int32_t i = 0; i < numOfRows; ++i) {
uint8_t type = pSchema[i].type; uint8_t type = pSchema[i].type;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes); int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE;
}
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName, bytes);
} else { } else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName);
} }
...@@ -721,7 +645,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, ...@@ -721,7 +645,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
for (int32_t i = 0; i < numOfRows; ++i) { for (int32_t i = 0; i < numOfRows; ++i) {
uint8_t type = pSchema[i].type; uint8_t type = pSchema[i].type;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes); int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE;
}
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
} else { } else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
} }
...@@ -731,7 +659,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, ...@@ -731,7 +659,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
for (int32_t i = numOfRows; i < totalRows; i++) { for (int32_t i = numOfRows; i < totalRows; i++) {
uint8_t type = pSchema[i].type; uint8_t type = pSchema[i].type;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes); int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE;
}
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
} else { } else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
} }
......
...@@ -13,14 +13,15 @@ ...@@ -13,14 +13,15 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include "tscLocalMerge.h"
#include "tscSubquery.h"
#include "os.h" #include "os.h"
#include "qAst.h"
#include "tlosertree.h" #include "tlosertree.h"
#include "tscLog.h"
#include "tscUtil.h" #include "tscUtil.h"
#include "tschemautil.h" #include "tschemautil.h"
#include "tsclient.h" #include "tsclient.h"
#include "tutil.h"
#include "tscLog.h"
#include "tscLocalMerge.h"
typedef struct SCompareParam { typedef struct SCompareParam {
SLocalDataSource **pLocalData; SLocalDataSource **pLocalData;
...@@ -29,6 +30,8 @@ typedef struct SCompareParam { ...@@ -29,6 +30,8 @@ typedef struct SCompareParam {
int32_t groupOrderType; int32_t groupOrderType;
} SCompareParam; } SCompareParam;
static void doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize);
int32_t treeComparator(const void *pLeft, const void *pRight, void *param) { int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
int32_t pLeftIdx = *(int32_t *)pLeft; int32_t pLeftIdx = *(int32_t *)pLeft;
int32_t pRightIdx = *(int32_t *)pRight; int32_t pRightIdx = *(int32_t *)pRight;
...@@ -97,14 +100,14 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc ...@@ -97,14 +100,14 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc
pCtx->param[2].i64Key = pQueryInfo->order.order; pCtx->param[2].i64Key = pQueryInfo->order.order;
pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT;
pCtx->param[1].i64Key = pQueryInfo->order.orderColId; pCtx->param[1].i64Key = pQueryInfo->order.orderColId;
} else if (functionId == TSDB_FUNC_APERCT) {
pCtx->param[0].i64Key = pExpr->param[0].i64Key;
pCtx->param[0].nType = pExpr->param[0].nType;
} }
SResultInfo *pResInfo = &pReducer->pResInfo[i]; pCtx->interBufBytes = pExpr->interBytes;
pResInfo->bufLen = pExpr->interBytes; pCtx->resultInfo = calloc(1, pCtx->interBufBytes + sizeof(SResultRowCellInfo));
pResInfo->interResultBuf = calloc(1, (size_t) pResInfo->bufLen); pCtx->stableQuery = true;
pCtx->resultInfo = &pReducer->pResInfo[i];
pCtx->resultInfo->superTableQ = true;
} }
int16_t n = 0; int16_t n = 0;
...@@ -132,12 +135,15 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc ...@@ -132,12 +135,15 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc
} }
static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) { static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) {
int32_t numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo);
int32_t offset = 0; int32_t offset = 0;
SFillColInfo* pFillCol = calloc(numOfCols, sizeof(SFillColInfo)); SFillColInfo* pFillCol = calloc(numOfCols, sizeof(SFillColInfo));
for(int32_t i = 0; i < numOfCols; ++i) { for(int32_t i = 0; i < numOfCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); SInternalField* pIField = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
if (pIField->pArithExprInfo == NULL) {
SSqlExpr* pExpr = pIField->pSqlExpr;
pFillCol[i].col.bytes = pExpr->resBytes; pFillCol[i].col.bytes = pExpr->resBytes;
pFillCol[i].col.type = (int8_t)pExpr->resType; pFillCol[i].col.type = (int8_t)pExpr->resType;
...@@ -146,14 +152,24 @@ static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) { ...@@ -146,14 +152,24 @@ static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) {
pFillCol[i].col.offset = offset; pFillCol[i].col.offset = offset;
pFillCol[i].functionId = pExpr->functionId; pFillCol[i].functionId = pExpr->functionId;
pFillCol[i].fillVal.i = pQueryInfo->fillVal[i]; pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
offset += pExpr->resBytes; } else {
pFillCol[i].col.bytes = pIField->field.bytes;
pFillCol[i].col.type = (int8_t)pIField->field.type;
pFillCol[i].col.colId = -100;
pFillCol[i].flag = TSDB_COL_NORMAL;
pFillCol[i].col.offset = offset;
pFillCol[i].functionId = -1;
pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
}
offset += pFillCol[i].col.bytes;
} }
return pFillCol; return pFillCol;
} }
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc, void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalmodel, SSqlObj* pSql) { SColumnModel *finalmodel, SColumnModel *pFFModel, SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd; SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res; SSqlRes* pRes = &pSql->res;
...@@ -227,7 +243,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -227,7 +243,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (ds == NULL) { if (ds == NULL) {
tscError("%p failed to create merge structure", pSql); tscError("%p failed to create merge structure", pSql);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
taosTFree(pReducer); tfree(pReducer);
return; return;
} }
...@@ -254,7 +270,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -254,7 +270,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (ds->filePage.num == 0) { // no data in this flush, the index does not increase if (ds->filePage.num == 0) { // no data in this flush, the index does not increase
tscDebug("%p flush data is empty, ignore %d flush record", pSql, idx); tscDebug("%p flush data is empty, ignore %d flush record", pSql, idx);
taosTFree(ds); tfree(ds);
continue; continue;
} }
...@@ -264,7 +280,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -264,7 +280,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
// no data actually, no need to merge result. // no data actually, no need to merge result.
if (idx == 0) { if (idx == 0) {
taosTFree(pReducer); tfree(pReducer);
return; return;
} }
...@@ -272,7 +288,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -272,7 +288,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
SCompareParam *param = malloc(sizeof(SCompareParam)); SCompareParam *param = malloc(sizeof(SCompareParam));
if (param == NULL) { if (param == NULL) {
taosTFree(pReducer); tfree(pReducer);
return; return;
} }
...@@ -286,8 +302,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -286,8 +302,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator); pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator);
if (pReducer->pLoserTree == NULL || pRes->code != 0) { if (pReducer->pLoserTree == NULL || pRes->code != 0) {
taosTFree(param); tfree(param);
taosTFree(pReducer); tfree(pReducer);
return; return;
} }
...@@ -330,22 +346,19 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -330,22 +346,19 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (pReducer->pTempBuffer == NULL || pReducer->discardData == NULL || pReducer->pResultBuf == NULL || if (pReducer->pTempBuffer == NULL || pReducer->discardData == NULL || pReducer->pResultBuf == NULL ||
/*pReducer->pBufForInterpo == NULL || */pReducer->pFinalRes == NULL || pReducer->prevRowOfInput == NULL) { /*pReducer->pBufForInterpo == NULL || */pReducer->pFinalRes == NULL || pReducer->prevRowOfInput == NULL) {
taosTFree(pReducer->pTempBuffer); tfree(pReducer->pTempBuffer);
taosTFree(pReducer->discardData); tfree(pReducer->discardData);
taosTFree(pReducer->pResultBuf); tfree(pReducer->pResultBuf);
taosTFree(pReducer->pFinalRes); tfree(pReducer->pFinalRes);
taosTFree(pReducer->prevRowOfInput); tfree(pReducer->prevRowOfInput);
taosTFree(pReducer->pLoserTree); tfree(pReducer->pLoserTree);
taosTFree(param); tfree(param);
taosTFree(pReducer); tfree(pReducer);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return; return;
} }
size_t numOfCols = tscSqlExprNumOfExprs(pQueryInfo);
pReducer->pTempBuffer->num = 0; pReducer->pTempBuffer->num = 0;
pReducer->pResInfo = calloc(numOfCols, sizeof(SResultInfo));
tscCreateResPointerInfo(pRes, pQueryInfo); tscCreateResPointerInfo(pRes, pQueryInfo);
tscInitSqlContext(pCmd, pReducer, pDesc); tscInitSqlContext(pCmd, pReducer, pDesc);
...@@ -373,8 +386,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd ...@@ -373,8 +386,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (pQueryInfo->fillType != TSDB_FILL_NONE) { if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo); SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
4096, (int32_t)numOfCols, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit, 4096, (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
tinfo.precision, pQueryInfo->fillType, pFillCol); tinfo.precision, pQueryInfo->fillType, pFillCol, pSql);
} }
} }
...@@ -489,47 +502,41 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { ...@@ -489,47 +502,41 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
tscDebug("%p waiting for delete procedure, status: %d", pSql, status); tscDebug("%p waiting for delete procedure, status: %d", pSql, status);
} }
pLocalReducer->pFillInfo = taosDestoryFillInfo(pLocalReducer->pFillInfo); pLocalReducer->pFillInfo = taosDestroyFillInfo(pLocalReducer->pFillInfo);
if (pLocalReducer->pCtx != NULL) { if (pLocalReducer->pCtx != NULL) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i]; SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i];
tVariantDestroy(&pCtx->tag); tVariantDestroy(&pCtx->tag);
tfree(pCtx->resultInfo);
if (pCtx->tagInfo.pTagCtxList != NULL) { if (pCtx->tagInfo.pTagCtxList != NULL) {
taosTFree(pCtx->tagInfo.pTagCtxList); tfree(pCtx->tagInfo.pTagCtxList);
} }
} }
taosTFree(pLocalReducer->pCtx); tfree(pLocalReducer->pCtx);
} }
taosTFree(pLocalReducer->prevRowOfInput); tfree(pLocalReducer->prevRowOfInput);
taosTFree(pLocalReducer->pTempBuffer); tfree(pLocalReducer->pTempBuffer);
taosTFree(pLocalReducer->pResultBuf); tfree(pLocalReducer->pResultBuf);
if (pLocalReducer->pResInfo != NULL) {
size_t num = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < num; ++i) {
taosTFree(pLocalReducer->pResInfo[i].interResultBuf);
}
taosTFree(pLocalReducer->pResInfo);
}
if (pLocalReducer->pLoserTree) { if (pLocalReducer->pLoserTree) {
taosTFree(pLocalReducer->pLoserTree->param); tfree(pLocalReducer->pLoserTree->param);
taosTFree(pLocalReducer->pLoserTree); tfree(pLocalReducer->pLoserTree);
} }
taosTFree(pLocalReducer->pFinalRes); tfree(pLocalReducer->pFinalRes);
taosTFree(pLocalReducer->discardData); tfree(pLocalReducer->discardData);
tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel, tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel,
pLocalReducer->numOfVnode); pLocalReducer->numOfVnode);
for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) { for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) {
taosTFree(pLocalReducer->pLocalDataSrc[i]); tfree(pLocalReducer->pLocalDataSrc[i]);
} }
pLocalReducer->numOfBuffer = 0; pLocalReducer->numOfBuffer = 0;
...@@ -563,7 +570,8 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm ...@@ -563,7 +570,8 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
if (numOfGroupByCols > 0) { if (numOfGroupByCols > 0) {
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
int32_t startCols = pQueryInfo->fieldsInfo.numOfOutput - pQueryInfo->groupbyExpr.numOfGroupCols; int32_t numOfInternalOutput = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
int32_t startCols = numOfInternalOutput - pQueryInfo->groupbyExpr.numOfGroupCols;
// the last "pQueryInfo->groupbyExpr.numOfGroupCols" columns are order-by columns // the last "pQueryInfo->groupbyExpr.numOfGroupCols" columns are order-by columns
for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) {
...@@ -596,7 +604,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm ...@@ -596,7 +604,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
} }
*pOrderDesc = tOrderDesCreate(orderColIndexList, numOfGroupByCols, pModel, pQueryInfo->order.order); *pOrderDesc = tOrderDesCreate(orderColIndexList, numOfGroupByCols, pModel, pQueryInfo->order.order);
taosTFree(orderColIndexList); tfree(orderColIndexList);
if (*pOrderDesc == NULL) { if (*pOrderDesc == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
...@@ -682,6 +690,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr ...@@ -682,6 +690,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
pSchema[i].bytes = pExpr->resBytes; pSchema[i].bytes = pExpr->resBytes;
pSchema[i].type = (int8_t)pExpr->resType; pSchema[i].type = (int8_t)pExpr->resType;
tstrncpy(pSchema[i].name, pExpr->aliasName, tListLen(pSchema[i].name));
rlen += pExpr->resBytes; rlen += pExpr->resBytes;
} }
...@@ -707,7 +717,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr ...@@ -707,7 +717,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
if (createOrderDescriptor(pOrderDesc, pCmd, pModel) != TSDB_CODE_SUCCESS) { if (createOrderDescriptor(pOrderDesc, pCmd, pModel) != TSDB_CODE_SUCCESS) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
taosTFree(pSchema); tfree(pSchema);
return pRes->code; return pRes->code;
} }
...@@ -744,8 +754,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr ...@@ -744,8 +754,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
} }
*pFinalModel = createColumnModel(pSchema, (int32_t)size, capacity); *pFinalModel = createColumnModel(pSchema, (int32_t)size, capacity);
taosTFree(pSchema);
tfree(pSchema);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -764,7 +774,7 @@ void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDe ...@@ -764,7 +774,7 @@ void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDe
pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]); pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]);
} }
taosTFree(pMemBuffer); tfree(pMemBuffer);
} }
/** /**
...@@ -924,7 +934,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO ...@@ -924,7 +934,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
} }
while (1) { while (1) {
int64_t newRows = taosGenerateDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity); int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity);
if (pQueryInfo->limit.offset < newRows) { if (pQueryInfo->limit.offset < newRows) {
newRows -= pQueryInfo->limit.offset; newRows -= pQueryInfo->limit.offset;
...@@ -953,7 +963,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO ...@@ -953,7 +963,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
} }
// all output in current group are completed // all output in current group are completed
int32_t totalRemainRows = (int32_t)getFilledNumOfRes(pFillInfo, actualETime, pLocalReducer->resColModel->capacity); int32_t totalRemainRows = (int32_t)getNumOfResWithFill(pFillInfo, actualETime, pLocalReducer->resColModel->capacity);
if (totalRemainRows <= 0) { if (totalRemainRows <= 0) {
break; break;
} }
...@@ -974,10 +984,11 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO ...@@ -974,10 +984,11 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo); savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo);
} }
int32_t offset = 0;
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
int16_t offset = getColumnModelOffset(pLocalReducer->resColModel, i);
memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, (size_t)(pField->bytes * pRes->numOfRows)); memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, (size_t)(pField->bytes * pRes->numOfRows));
offset += pField->bytes;
} }
pRes->numOfRowsGroup += pRes->numOfRows; pRes->numOfRowsGroup += pRes->numOfRows;
...@@ -986,10 +997,10 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO ...@@ -986,10 +997,10 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
pBeforeFillData->num = 0; pBeforeFillData->num = 0;
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
taosTFree(pResPages[i]); tfree(pResPages[i]);
} }
taosTFree(pResPages); tfree(pResPages);
} }
static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) { static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
...@@ -1072,7 +1083,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx) ...@@ -1072,7 +1083,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx)
continue; continue;
} }
SResultInfo* pResInfo = GET_RES_INFO(&pCtx[j]); SResultRowCellInfo* pResInfo = GET_RES_INFO(&pCtx[j]);
if (maxOutput < pResInfo->numOfRes) { if (maxOutput < pResInfo->numOfRes) {
maxOutput = pResInfo->numOfRes; maxOutput = pResInfo->numOfRes;
} }
...@@ -1230,6 +1241,10 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur ...@@ -1230,6 +1241,10 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
tColModelCompact(pModel, pResBuf, pModel->capacity); tColModelCompact(pModel, pResBuf, pModel->capacity);
if (tscIsSecondStageQuery(pQueryInfo)) {
doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalReducer->finalRowSize);
}
#ifdef _DEBUG_VIEW #ifdef _DEBUG_VIEW
printf("final result before interpo:\n"); printf("final result before interpo:\n");
// tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num); // tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num);
...@@ -1253,10 +1268,11 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur ...@@ -1253,10 +1268,11 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
return true; return true;
} }
void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) { // reset output buffer to the beginning void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {// reset output buffer to the beginning
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { size_t t = tscSqlExprNumOfExprs(pQueryInfo);
pLocalReducer->pCtx[i].aOutputBuf = for (int32_t i = 0; i < t; ++i) {
pLocalReducer->pResultBuf->data + tscFieldInfoGetOffset(pQueryInfo, i) * pLocalReducer->resColModel->capacity; SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
pLocalReducer->pCtx[i].aOutputBuf = pLocalReducer->pResultBuf->data + pExpr->offset * pLocalReducer->resColModel->capacity;
} }
memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage)); memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage));
...@@ -1301,7 +1317,7 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) { ...@@ -1301,7 +1317,7 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1)); int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1));
// the first column must be the timestamp column // the first column must be the timestamp column
int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity); int32_t rows = (int32_t) getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity);
if (rows > 0) { // do fill gap if (rows > 0) { // do fill gap
doFillResult(pSql, pLocalReducer, false); doFillResult(pSql, pLocalReducer, false);
} }
...@@ -1330,7 +1346,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) { ...@@ -1330,7 +1346,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) { ((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) {
int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey; int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey;
int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity); int32_t rows = (int32_t)getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity);
if (rows > 0) { if (rows > 0) {
doFillResult(pSql, pLocalReducer, true); doFillResult(pSql, pLocalReducer, true);
} }
...@@ -1501,8 +1517,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) { ...@@ -1501,8 +1517,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
if (pLocalReducer->discard && sameGroup) { if (pLocalReducer->discard && sameGroup) {
pLocalReducer->hasUnprocessedRow = false; pLocalReducer->hasUnprocessedRow = false;
tmpBuffer->num = 0; tmpBuffer->num = 0;
} else { } else { // current row does not belongs to the previous group, so it is not be handled yet.
// current row does not belongs to the previous group, so it is not be handled yet.
pLocalReducer->hasUnprocessedRow = true; pLocalReducer->hasUnprocessedRow = true;
} }
...@@ -1596,3 +1611,44 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen) ...@@ -1596,3 +1611,44 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
pRes->pLocalReducer->pResultBuf->num = numOfRes; pRes->pLocalReducer->pResultBuf->num = numOfRes;
pRes->data = pRes->pLocalReducer->pResultBuf->data; pRes->data = pRes->pLocalReducer->pResultBuf->data;
} }
void doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {
char* pbuf = calloc(1, pOutput->num * rowSize);
size_t size = tscNumOfFields(pQueryInfo);
SArithmeticSupport arithSup = {0};
// todo refactor
arithSup.offset = 0;
arithSup.numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
arithSup.exprList = pQueryInfo->exprList;
arithSup.data = calloc(arithSup.numOfCols, POINTER_BYTES);
for(int32_t k = 0; k < arithSup.numOfCols; ++k) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k);
arithSup.data[k] = (pOutput->data + pOutput->num* pExpr->offset);
}
int32_t offset = 0;
for (int i = 0; i < size; ++i) {
SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
// calculate the result from several other columns
if (pSup->pArithExprInfo != NULL) {
arithSup.pArithExpr = pSup->pArithExprInfo;
tExprTreeCalcTraverse(arithSup.pArithExpr->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithemicInputSrc);
} else {
SSqlExpr* pExpr = pSup->pSqlExpr;
memcpy(pbuf + pOutput->num * offset, pExpr->offset * pOutput->num + pOutput->data, pExpr->resBytes * pOutput->num);
}
offset += pSup->field.bytes;
}
assert(finalRowSize <= rowSize);
memcpy(pOutput->data, pbuf, pOutput->num * finalRowSize);
tfree(pbuf);
tfree(arithSup.data);
}
\ No newline at end of file
...@@ -702,7 +702,7 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st ...@@ -702,7 +702,7 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st
} }
int32_t code = TSDB_CODE_TSC_INVALID_SQL; int32_t code = TSDB_CODE_TSC_INVALID_SQL;
char * tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \" char * tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) { if (NULL == tmpTokenBuf) {
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
...@@ -1148,6 +1148,10 @@ int tsParseInsertSql(SSqlObj *pSql) { ...@@ -1148,6 +1148,10 @@ int tsParseInsertSql(SSqlObj *pSql) {
index = 0; index = 0;
sToken = tStrGetToken(str, &index, false, 0, NULL); sToken = tStrGetToken(str, &index, false, 0, NULL);
if (sToken.type != TK_STRING && sToken.type != TK_ID) {
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
goto _error;
}
str += index; str += index;
if (sToken.n == 0) { if (sToken.n == 0) {
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z); code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
...@@ -1406,7 +1410,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { ...@@ -1406,7 +1410,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
assert(taos_errno(pSql) == code); assert(taos_errno(pSql) == code);
taos_free_result(pSql); taos_free_result(pSql);
taosTFree(pSupporter); tfree(pSupporter);
fclose(fp); fclose(fp);
pParentSql->res.code = code; pParentSql->res.code = code;
...@@ -1445,7 +1449,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { ...@@ -1445,7 +1449,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
char *tokenBuf = calloc(1, 4096); char *tokenBuf = calloc(1, 4096);
while ((readLen = taosGetline(&line, &n, fp)) != -1) { while ((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
line[--readLen] = 0; line[--readLen] = 0;
} }
...@@ -1470,7 +1474,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { ...@@ -1470,7 +1474,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
} }
} }
taosTFree(tokenBuf); tfree(tokenBuf);
free(line); free(line);
if (count > 0) { if (count > 0) {
...@@ -1483,7 +1487,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { ...@@ -1483,7 +1487,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
} else { } else {
taos_free_result(pSql); taos_free_result(pSql);
taosTFree(pSupporter); tfree(pSupporter);
fclose(fp); fclose(fp);
pParentSql->fp = pParentSql->fetchFp; pParentSql->fp = pParentSql->fetchFp;
...@@ -1513,7 +1517,7 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) { ...@@ -1513,7 +1517,7 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
pSql->res.code = TAOS_SYSTEM_ERROR(errno); pSql->res.code = TAOS_SYSTEM_ERROR(errno);
tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code)); tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code));
taosTFree(pSupporter) tfree(pSupporter)
tscQueueAsyncRes(pSql); tscQueueAsyncRes(pSql);
return; return;
......
...@@ -222,7 +222,7 @@ void tscKillStream(STscObj *pObj, uint32_t killId) { ...@@ -222,7 +222,7 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
} }
int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
SCMHeartBeatMsg *pHeartbeat = pMsg; SHeartBeatMsg *pHeartbeat = pMsg;
int allocedQueriesNum = pHeartbeat->numOfQueries; int allocedQueriesNum = pHeartbeat->numOfQueries;
int allocedStreamsNum = pHeartbeat->numOfStreams; int allocedStreamsNum = pHeartbeat->numOfStreams;
...@@ -277,7 +277,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { ...@@ -277,7 +277,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
} }
int32_t msgLen = pHeartbeat->numOfQueries * sizeof(SQueryDesc) + pHeartbeat->numOfStreams * sizeof(SStreamDesc) + int32_t msgLen = pHeartbeat->numOfQueries * sizeof(SQueryDesc) + pHeartbeat->numOfStreams * sizeof(SStreamDesc) +
sizeof(SCMHeartBeatMsg); sizeof(SHeartBeatMsg);
pHeartbeat->connId = htonl(pObj->connId); pHeartbeat->connId = htonl(pObj->connId);
pHeartbeat->numOfQueries = htonl(pHeartbeat->numOfQueries); pHeartbeat->numOfQueries = htonl(pHeartbeat->numOfQueries);
pHeartbeat->numOfStreams = htonl(pHeartbeat->numOfStreams); pHeartbeat->numOfStreams = htonl(pHeartbeat->numOfStreams);
......
此差异已折叠。
...@@ -118,7 +118,7 @@ SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t colIndex) ...@@ -118,7 +118,7 @@ SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t colIndex)
} }
// TODO for large number of columns, employ the binary search method // TODO for large number of columns, employ the binary search method
SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId) { SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId) {
STableComInfo tinfo = tscGetTableInfo(pTableMeta); STableComInfo tinfo = tscGetTableInfo(pTableMeta);
for(int32_t i = 0; i < tinfo.numOfColumns + tinfo.numOfTags; ++i) { for(int32_t i = 0; i < tinfo.numOfColumns + tinfo.numOfTags; ++i) {
...@@ -130,17 +130,7 @@ SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId) { ...@@ -130,17 +130,7 @@ SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId) {
return NULL; return NULL;
} }
struct SSchema tscGetTbnameColumnSchema() { static void tscInitCorVgroupInfo(SCorVgroupInfo *corVgroupInfo, SVgroupInfo *vgroupInfo) {
struct SSchema s = {
.colId = TSDB_TBNAME_COLUMN_INDEX,
.type = TSDB_DATA_TYPE_BINARY,
.bytes = TSDB_TABLE_NAME_LEN
};
strcpy(s.name, TSQL_TBNAME_L);
return s;
}
static void tscInitCorVgroupInfo(SCMCorVgroupInfo *corVgroupInfo, SCMVgroupInfo *vgroupInfo) {
corVgroupInfo->version = 0; corVgroupInfo->version = 0;
corVgroupInfo->inUse = 0; corVgroupInfo->inUse = 0;
corVgroupInfo->numOfEps = vgroupInfo->numOfEps; corVgroupInfo->numOfEps = vgroupInfo->numOfEps;
...@@ -166,7 +156,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size ...@@ -166,7 +156,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
pTableMeta->id.tid = pTableMetaMsg->tid; pTableMeta->id.tid = pTableMetaMsg->tid;
pTableMeta->id.uid = pTableMetaMsg->uid; pTableMeta->id.uid = pTableMetaMsg->uid;
SCMVgroupInfo* pVgroupInfo = &pTableMeta->vgroupInfo; SVgroupInfo* pVgroupInfo = &pTableMeta->vgroupInfo;
pVgroupInfo->numOfEps = pTableMetaMsg->vgroup.numOfEps; pVgroupInfo->numOfEps = pTableMetaMsg->vgroup.numOfEps;
pVgroupInfo->vgId = pTableMetaMsg->vgroup.vgId; pVgroupInfo->vgId = pTableMetaMsg->vgroup.vgId;
...@@ -197,28 +187,6 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size ...@@ -197,28 +187,6 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
return pTableMeta; return pTableMeta;
} }
/**
* the TableMeta data format in memory is as follows:
*
* +--------------------+
* |STableMeta Body data| sizeof(STableMeta)
* +--------------------+
* |Schema data | numOfTotalColumns * sizeof(SSchema)
* +--------------------+
* |Tags data | tag_col_1.bytes + tag_col_2.bytes + ....
* +--------------------+
*
* @param pTableMeta
* @return
*/
char* tsGetTagsValue(STableMeta* pTableMeta) {
int32_t offset = 0;
// int32_t numOfTotalCols = pTableMeta->numOfColumns + pTableMeta->numOfTags;
// uint32_t offset = sizeof(STableMeta) + numOfTotalCols * sizeof(SSchema);
return ((char*)pTableMeta + offset);
}
// todo refactor // todo refactor
UNUSED_FUNC static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) { UNUSED_FUNC static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) {
for (int32_t i = 0; i < num; ++i) { for (int32_t i = 0; i < num; ++i) {
......
此差异已折叠。
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include "tutil.h" #include "tutil.h"
#include "ttimer.h" #include "ttimer.h"
#include "tscProfile.h" #include "tscProfile.h"
#include "ttimer.h"
static bool validImpl(const char* str, size_t maxsize) { static bool validImpl(const char* str, size_t maxsize) {
if (str == NULL) { if (str == NULL) {
...@@ -161,6 +160,7 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa ...@@ -161,6 +160,7 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa
registerSqlObj(pSql); registerSqlObj(pSql);
tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg); tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
pObj->rid = taosAddRef(tscRefId, pObj);
return pSql; return pSql;
} }
...@@ -278,9 +278,9 @@ void taos_close(TAOS *taos) { ...@@ -278,9 +278,9 @@ void taos_close(TAOS *taos) {
SSqlObj* pHb = pObj->pHb; SSqlObj* pHb = pObj->pHb;
if (pHb != NULL && atomic_val_compare_exchange_ptr(&pObj->pHb, pHb, 0) == pHb) { if (pHb != NULL && atomic_val_compare_exchange_ptr(&pObj->pHb, pHb, 0) == pHb) {
if (pHb->pRpcCtx != NULL) { // wait for rsp from dnode if (pHb->rpcRid > 0) { // wait for rsp from dnode
rpcCancelRequest(pHb->pRpcCtx); rpcCancelRequest(pHb->rpcRid);
pHb->pRpcCtx = NULL; pHb->rpcRid = -1;
} }
tscDebug("%p HB is freed", pHb); tscDebug("%p HB is freed", pHb);
...@@ -296,7 +296,8 @@ void taos_close(TAOS *taos) { ...@@ -296,7 +296,8 @@ void taos_close(TAOS *taos) {
} }
tscDebug("%p all sqlObj are freed, free tscObj and close dnodeConn:%p", pObj, pObj->pDnodeConn); tscDebug("%p all sqlObj are freed, free tscObj and close dnodeConn:%p", pObj, pObj->pDnodeConn);
tscCloseTscObj(pObj);
taosRemoveRef(tscRefId, pObj->rid);
} }
void waitForQueryRsp(void *param, TAOS_RES *tres, int code) { void waitForQueryRsp(void *param, TAOS_RES *tres, int code) {
...@@ -480,7 +481,7 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) { ...@@ -480,7 +481,7 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) {
assert(0); assert(0);
for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i); tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, 0);
} }
*rows = pRes->tsrow; *rows = pRes->tsrow;
...@@ -564,7 +565,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { ...@@ -564,7 +565,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) {
pRes->rspType = 0; pRes->rspType = 0;
pSql->subState.numOfSub = 0; pSql->subState.numOfSub = 0;
taosTFree(pSql->pSubs); tfree(pSql->pSubs);
assert(pSql->fp == NULL); assert(pSql->fp == NULL);
...@@ -746,9 +747,9 @@ static void tscKillSTableQuery(SSqlObj *pSql) { ...@@ -746,9 +747,9 @@ static void tscKillSTableQuery(SSqlObj *pSql) {
assert(pSubObj->self == (SSqlObj**) p); assert(pSubObj->self == (SSqlObj**) p);
pSubObj->res.code = TSDB_CODE_TSC_QUERY_CANCELLED; pSubObj->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
if (pSubObj->pRpcCtx != NULL) { if (pSubObj->rpcRid > 0) {
rpcCancelRequest(pSubObj->pRpcCtx); rpcCancelRequest(pSubObj->rpcRid);
pSubObj->pRpcCtx = NULL; pSubObj->rpcRid = -1;
} }
tscQueueAsyncRes(pSubObj); tscQueueAsyncRes(pSubObj);
...@@ -773,7 +774,7 @@ void taos_stop_query(TAOS_RES *res) { ...@@ -773,7 +774,7 @@ void taos_stop_query(TAOS_RES *res) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
assert(pSql->pRpcCtx == NULL); assert(pSql->rpcRid <= 0);
tscKillSTableQuery(pSql); tscKillSTableQuery(pSql);
} else { } else {
if (pSql->cmd.command < TSDB_SQL_LOCAL) { if (pSql->cmd.command < TSDB_SQL_LOCAL) {
...@@ -782,9 +783,9 @@ void taos_stop_query(TAOS_RES *res) { ...@@ -782,9 +783,9 @@ void taos_stop_query(TAOS_RES *res) {
* reset and freed in the processMsgFromServer function, and causes the invalid * reset and freed in the processMsgFromServer function, and causes the invalid
* write problem for rpcCancelRequest. * write problem for rpcCancelRequest.
*/ */
if (pSql->pRpcCtx != NULL) { if (pSql->rpcRid > 0) {
rpcCancelRequest(pSql->pRpcCtx); rpcCancelRequest(pSql->rpcRid);
pSql->pRpcCtx = NULL; pSql->rpcRid = -1;
} }
tscQueueAsyncRes(pSql); tscQueueAsyncRes(pSql);
...@@ -892,7 +893,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) { ...@@ -892,7 +893,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
if (sqlLen > tsMaxSQLStringLen) { if (sqlLen > tsMaxSQLStringLen) {
tscError("%p sql too long", pSql); tscError("%p sql too long", pSql);
pRes->code = TSDB_CODE_TSC_INVALID_SQL; pRes->code = TSDB_CODE_TSC_INVALID_SQL;
taosTFree(pSql); tfree(pSql);
return pRes->code; return pRes->code;
} }
...@@ -901,7 +902,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) { ...@@ -901,7 +902,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscError("%p failed to malloc sql string buffer", pSql); tscError("%p failed to malloc sql string buffer", pSql);
tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj); tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj);
taosTFree(pSql); tfree(pSql);
return pRes->code; return pRes->code;
} }
......
...@@ -273,7 +273,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf ...@@ -273,7 +273,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false); taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false);
tscFreeSqlResult(pSql); tscFreeSqlResult(pSql);
taosTFree(pSql->pSubs); tfree(pSql->pSubs);
pSql->subState.numOfSub = 0; pSql->subState.numOfSub = 0;
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList); pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
tscSetNextLaunchTimer(pStream, pSql); tscSetNextLaunchTimer(pStream, pSql);
...@@ -617,6 +617,6 @@ void taos_close_stream(TAOS_STREAM *handle) { ...@@ -617,6 +617,6 @@ void taos_close_stream(TAOS_STREAM *handle) {
pStream->pSql = NULL; pStream->pSql = NULL;
taos_free_result(pSql); taos_free_result(pSql);
taosTFree(pStream); tfree(pStream);
} }
} }
此差异已折叠。
...@@ -35,6 +35,7 @@ void * tscTmr; ...@@ -35,6 +35,7 @@ void * tscTmr;
void * tscQhandle; void * tscQhandle;
void * tscCheckDiskUsageTmr; void * tscCheckDiskUsageTmr;
int tsInsertHeadSize; int tsInsertHeadSize;
int tscRefId = -1;
int tscNumOfThreads; int tscNumOfThreads;
...@@ -78,7 +79,7 @@ int32_t tscInitRpc(const char *user, const char *secretEncrypt, void **pDnodeCon ...@@ -78,7 +79,7 @@ int32_t tscInitRpc(const char *user, const char *secretEncrypt, void **pDnodeCon
void taos_init_imp(void) { void taos_init_imp(void) {
char temp[128]; char temp[128] = {0};
errno = TSDB_CODE_SUCCESS; errno = TSDB_CODE_SUCCESS;
srand(taosGetTimestampSec()); srand(taosGetTimestampSec());
...@@ -102,8 +103,8 @@ void taos_init_imp(void) { ...@@ -102,8 +103,8 @@ void taos_init_imp(void) {
taosReadGlobalCfg(); taosReadGlobalCfg();
taosCheckGlobalCfg(); taosCheckGlobalCfg();
taosPrintGlobalCfg();
rpcInit();
tscDebug("starting to initialize TAOS client ..."); tscDebug("starting to initialize TAOS client ...");
tscDebug("Local End Point is:%s", tsLocalEp); tscDebug("Local End Point is:%s", tsLocalEp);
} }
...@@ -145,29 +146,45 @@ void taos_init_imp(void) { ...@@ -145,29 +146,45 @@ void taos_init_imp(void) {
tscObjCache = taosCacheInit(TSDB_CACHE_PTR_KEY, refreshTime / 2, false, tscFreeRegisteredSqlObj, "sqlObj"); tscObjCache = taosCacheInit(TSDB_CACHE_PTR_KEY, refreshTime / 2, false, tscFreeRegisteredSqlObj, "sqlObj");
} }
tscRefId = taosOpenRef(200, tscCloseTscObj);
// in other language APIs, taos_cleanup is not available yet.
// So, to make sure taos_cleanup will be invoked to clean up the allocated
// resource to suppress the valgrind warning.
atexit(taos_cleanup);
tscDebug("client is initialized successfully"); tscDebug("client is initialized successfully");
} }
void taos_init() { pthread_once(&tscinit, taos_init_imp); } void taos_init() { pthread_once(&tscinit, taos_init_imp); }
void taos_cleanup() { // this function may be called by user or system, or by both simultaneously.
if (tscMetaCache != NULL) { void taos_cleanup(void) {
taosCacheCleanup(tscMetaCache); tscDebug("start to cleanup client environment");
tscMetaCache = NULL;
void* m = tscMetaCache;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscMetaCache, m, 0) == m) {
taosCacheCleanup(m);
}
taosCacheCleanup(tscObjCache); m = tscObjCache;
tscObjCache = NULL; if (m != NULL && atomic_val_compare_exchange_ptr(&tscObjCache, m, 0) == m) {
taosCacheCleanup(m);
} }
if (tscQhandle != NULL) { m = tscQhandle;
taosCleanUpScheduler(tscQhandle); if (m != NULL && atomic_val_compare_exchange_ptr(&tscQhandle, m, 0) == m) {
tscQhandle = NULL; taosCleanUpScheduler(m);
} }
taosCloseRef(tscRefId);
taosCleanupKeywordsTable(); taosCleanupKeywordsTable();
taosCloseLog(); taosCloseLog();
if (tscEmbedded == 0) rpcCleanup();
taosTmrCleanUp(tscTmr); m = tscTmr;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscTmr, m, 0) == m) {
taosTmrCleanUp(m);
}
} }
static int taos_options_imp(TSDB_OPTION option, const char *pStr) { static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
......
此差异已折叠。
此差异已折叠。
...@@ -44,6 +44,7 @@ extern int32_t tsMaxShellConns; ...@@ -44,6 +44,7 @@ extern int32_t tsMaxShellConns;
extern int32_t tsShellActivityTimer; extern int32_t tsShellActivityTimer;
extern uint32_t tsMaxTmrCtrl; extern uint32_t tsMaxTmrCtrl;
extern float tsNumOfThreadsPerCore; extern float tsNumOfThreadsPerCore;
extern int32_t tsNumOfCommitThreads;
extern float tsRatioOfQueryThreads; // todo remove it extern float tsRatioOfQueryThreads; // todo remove it
extern int8_t tsDaylight; extern int8_t tsDaylight;
extern char tsTimezone[]; extern char tsTimezone[];
...@@ -51,6 +52,7 @@ extern char tsLocale[]; ...@@ -51,6 +52,7 @@ extern char tsLocale[];
extern char tsCharset[]; // default encode string extern char tsCharset[]; // default encode string
extern int32_t tsEnableCoreFile; extern int32_t tsEnableCoreFile;
extern int32_t tsCompressMsgSize; extern int32_t tsCompressMsgSize;
extern char tsTempDir[];
//query buffer management //query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer for each data node during query processing extern int32_t tsQueryBufferSize; // maximum allowed usage buffer for each data node during query processing
...@@ -87,6 +89,7 @@ extern int16_t tsWAL; ...@@ -87,6 +89,7 @@ extern int16_t tsWAL;
extern int32_t tsFsyncPeriod; extern int32_t tsFsyncPeriod;
extern int32_t tsReplications; extern int32_t tsReplications;
extern int32_t tsQuorum; extern int32_t tsQuorum;
extern int32_t tsUpdate;
// balance // balance
extern int32_t tsEnableBalance; extern int32_t tsEnableBalance;
......
...@@ -35,6 +35,6 @@ bool tscValidateTableNameLength(size_t len); ...@@ -35,6 +35,6 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters); SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
// int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision); SSchema tscGetTbnameColumnSchema();
#endif // TDENGINE_NAME_H #endif // TDENGINE_NAME_H
此差异已折叠。
此差异已折叠。
...@@ -188,3 +188,14 @@ void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) { ...@@ -188,3 +188,14 @@ void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) {
pToken->z = r; pToken->z = r;
} }
} }
SSchema tscGetTbnameColumnSchema() {
struct SSchema s = {
.colId = TSDB_TBNAME_COLUMN_INDEX,
.type = TSDB_DATA_TYPE_BINARY,
.bytes = TSDB_TABLE_NAME_LEN
};
strcpy(s.name, TSQL_TBNAME_L);
return s;
}
\ No newline at end of file
此差异已折叠。
...@@ -108,7 +108,7 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32 ...@@ -108,7 +108,7 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32
break; break;
} }
case TSDB_DATA_TYPE_BINARY: { // todo refactor, extract a method case TSDB_DATA_TYPE_BINARY: { // todo refactor, extract a method
pVar->pz = calloc(len, sizeof(char)); pVar->pz = calloc(len + 1, sizeof(char));
memcpy(pVar->pz, pz, len); memcpy(pVar->pz, pz, len);
pVar->nLen = (int32_t)len; pVar->nLen = (int32_t)len;
break; break;
...@@ -125,7 +125,7 @@ void tVariantDestroy(tVariant *pVar) { ...@@ -125,7 +125,7 @@ void tVariantDestroy(tVariant *pVar) {
if (pVar == NULL) return; if (pVar == NULL) return;
if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) { if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) {
taosTFree(pVar->pz); tfree(pVar->pz);
pVar->nLen = 0; pVar->nLen = 0;
} }
......
此差异已折叠。
Subproject commit 8d7bf743852897110cbdcc7c4322cd7a74d4167b Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f
Subproject commit b62a26ecc164a310104df57691691b237e091c89
此差异已折叠。
...@@ -96,7 +96,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { ...@@ -96,7 +96,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
} }
public int getDriverMajorVersion() { public int getDriverMajorVersion() {
return 0; return 2;
} }
public int getDriverMinorVersion() { public int getDriverMinorVersion() {
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册