提交 215ba4bc 编写于 作者: haoranc's avatar haoranc

Merge branch 'develop' of github.com:taosdata/TDengine into dev/chr

...@@ -4,8 +4,10 @@ import jenkins.model.CauseOfInterruption ...@@ -4,8 +4,10 @@ import jenkins.model.CauseOfInterruption
node { node {
} }
def skipbuild=0 def skipbuild = 0
def win_stop=0 def win_stop = 0
def scope = []
def mod = [0,1,2,3,4]
def abortPreviousBuilds() { def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME def currentJobName = env.JOB_NAME
...@@ -349,7 +351,7 @@ pipeline { ...@@ -349,7 +351,7 @@ pipeline {
} }
stages { stages {
stage('pre_build'){ stage('pre_build'){
agent{label 'master'} agent{label 'catalina'}
options { skipDefaultCheckout() } options { skipDefaultCheckout() }
when { when {
changeRequest() changeRequest()
...@@ -358,44 +360,32 @@ pipeline { ...@@ -358,44 +360,32 @@ pipeline {
script{ script{
abort_previous() abort_previous()
abortPreviousBuilds() abortPreviousBuilds()
} println env.CHANGE_BRANCH
// sh''' if(env.CHANGE_FORK){
// rm -rf ${WORKSPACE}.tes scope = ['connector','query','insert','other','tools','taosAdapter']
// cp -r ${WORKSPACE} ${WORKSPACE}.tes }
// cd ${WORKSPACE}.tes else{
// git fetch sh'''
// ''' cd ${WKC}
// script { git fetch
// if (env.CHANGE_TARGET == 'master') { git checkout ${CHANGE_BRANCH}
// sh ''' git pull
// git checkout master '''
// ''' dir('/var/lib/jenkins/workspace/TDinternal/community'){
// } gitlog = sh(script: "git log -1 --pretty=%B ", returnStdout:true)
// else if(env.CHANGE_TARGET == '2.0'){ println gitlog
// sh ''' if (!(gitlog =~ /\((.*?)\)/)){
// git checkout 2.0 autoCancelled = true
// ''' error('Aborting the build.')
// } }
// else{ temp = (gitlog =~ /\((.*?)\)/)
// sh ''' temp = temp[0].remove(1)
// git checkout develop scope = temp.split(",")
// ''' Collections.shuffle mod
// } }
// }
// sh'''
// git fetch origin +refs/pull/${CHANGE_ID}/merge
// git checkout -qf FETCH_HEAD
// '''
// script{ }
// skipbuild='2' }
// skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true)
// println skipbuild
// }
// sh'''
// rm -rf ${WORKSPACE}.tes
// '''
// }
} }
} }
stage('Parallel test stage') { stage('Parallel test stage') {
...@@ -408,239 +398,90 @@ pipeline { ...@@ -408,239 +398,90 @@ pipeline {
} }
} }
parallel { parallel {
stage('python_1_s1') { stage('python_1') {
agent{label " slave1 || slave11 "} agent{label " slave1 || slave6 || slave11 || slave16 "}
steps { steps {
pre_test() pre_test()
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
sh ''' script{
date scope.each {
cd ${WKC}/tests sh """
./test-all.sh p1 date
date''' cd ${WKC}/tests
} ./test-CI.sh ${it} 5 ${mod[0]}
date"""
}
}
}
} }
} }
stage('python_2_s5') { stage('python_2') {
agent{label " slave5 || slave15 "} agent{label " slave2 || slave7 || slave12 || slave17 "}
steps { steps {
pre_test() pre_test()
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
sh ''' script{
date scope.each {
cd ${WKC}/tests sh """
./test-all.sh p2 date
date''' cd ${WKC}/tests
} ./test-CI.sh ${it} 5 ${mod[1]}
} date"""
} }
stage('python_3_s6') { }
agent{label " slave6 || slave16 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
cd ${WKC}/tests
./test-all.sh p3
date'''
} }
} }
} }
stage('test_b1_s2') { stage('python_3') {
agent{label " slave2 || slave12 "} agent{label " slave3 || slave8 || slave13 ||slave18 "}
steps { steps {
timeout(time: 105, unit: 'MINUTES'){ timeout(time: 105, unit: 'MINUTES'){
pre_test() pre_test()
sh ''' script{
rm -rf /var/lib/taos/* scope.each {
rm -rf /var/log/taos/* sh """
nohup taosd >/dev/null & date
sleep 10 cd ${WKC}/tests
''' ./test-CI.sh ${it} 5 ${mod[2]}
date"""
sh ''' }
cd ${WKC}/src/connector/python
export PYTHONPATH=$PWD/
export LD_LIBRARY_PATH=${WKC}/debug/build/lib
pip3 install pytest
pytest tests/
python3 examples/bind-multi.py
python3 examples/bind-row.py
python3 examples/demo.py
python3 examples/insert-lines.py
python3 examples/pep-249.py
python3 examples/query-async.py
python3 examples/query-objectively.py
python3 examples/subscribe-sync.py
python3 examples/subscribe-async.py
'''
sh '''
cd ${WKC}/src/connector/nodejs
npm install
npm run test
cd ${WKC}/tests/examples/nodejs
npm install td2.0-connector > /dev/null 2>&1
node nodejsChecker.js host=localhost
node test1970.js
cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport
npm install td2.0-connector > /dev/null 2>&1
node nanosecondTest.js
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/src/connector/C#
dotnet test
dotnet run --project src/test/Cases/Cases.csproj
cd ${WKC}/tests/examples/C#
dotnet run --project C#checker/C#checker.csproj
dotnet run --project TDengineTest/TDengineTest.csproj
dotnet run --project schemaless/schemaless.csproj
cd ${WKC}/tests/examples/C#/taosdemo
dotnet build -c Release
tree | true
./bin/Release/net5.0/taosdemo -c /etc/taos -y
'''
} }
sh '''
cd ${WKC}/tests/gotest
bash batchtest.sh
'''
sh '''
cd ${WKC}/tests
./test-all.sh b1fq
date'''
} }
} }
} }
stage('test_crash_gen_s3') { stage('python_4') {
agent{label " slave3 || slave13 "} agent{label " slave4 || slave9 || slave14 || slave19 "}
steps { steps {
pre_test()
timeout(time: 60, unit: 'MINUTES'){
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
timeout(time: 60, unit: 'MINUTES'){
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
./test-all.sh b2fq
date
'''
}
}
}
stage('test_valgrind_s4') {
agent{label " slave4 || slave14 "}
steps {
pre_test()
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
'''
}
timeout(time: 55, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
./test-all.sh b3fq
date'''
sh '''
date
cd ${WKC}/tests
./test-all.sh full example
date'''
}
}
}
stage('test_b4_s7') {
agent{label " slave7 || slave17 "}
steps {
timeout(time: 105, unit: 'MINUTES'){
pre_test() pre_test()
sh ''' script{
date scope.each {
cd ${WKC}/tests sh """
./test-all.sh b4fq date
cd ${WKC}/tests cd ${WKC}/tests
./test-all.sh p4 ./test-CI.sh ${it} 5 ${mod[3]}
''' date"""
// cd ${WKC}/tests }
// ./test-all.sh full jdbc }
// cd ${WKC}/tests
// ./test-all.sh full unit
}
}
}
stage('test_b5_s8') {
agent{label " slave8 || slave18 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
cd ${WKC}/tests
./test-all.sh b5fq
date'''
}
}
}
stage('test_b6_s9') {
agent{label " slave9 || slave19 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh develop-test
'''
sh '''
date
cd ${WKC}/tests
./test-all.sh b6fq
date'''
} }
} }
} }
stage('test_b7_s10') { stage('python_5') {
agent{label " slave10 || slave20 "} agent{label " slave5 || slave10 || slave15 || slave20 "}
steps { steps {
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
pre_test() pre_test()
sh ''' script{
cd ${WKC}/tests scope.each {
./test-all.sh system-test sh """
''' date
sh ''' cd ${WKC}/tests
date ./test-CI.sh ${it} 5 ${mod[4]}
cd ${WKC}/tests date"""
./test-all.sh b7fq }
date''' }
} }
} }
} }
...@@ -813,3 +654,4 @@ pipeline { ...@@ -813,3 +654,4 @@ pipeline {
} }
} }
} }
...@@ -594,6 +594,65 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws ...@@ -594,6 +594,65 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
``` ```
### <a class="anchor" id="set-client-configuration"></a>设置客户端参数
从TDengine-2.3.5.0版本开始,jdbc driver支持在应用的第一次连接中,设置TDengine的客户端参数。Driver支持JDBC-JNI方式中,通过jdbcUrl和properties两种方式设置client parameter。
注意:
* JDBC-RESTful不支持设置client parameter的功能。
* 应用中设置的client parameter为进程级别的,即如果要更新client的参数,需要重启应用。这是因为client parameter是全局参数,仅在应用程序的第一次设置生效。
* 以下示例代码基于taos-jdbcdriver-2.0.36。
示例代码:
```java
public class ClientParameterSetting {
private static final String host = "127.0.0.1";
public static void main(String[] args) throws SQLException {
setParameterInJdbcUrl();
setParameterInProperties();
}
private static void setParameterInJdbcUrl() throws SQLException {
String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?debugFlag=135&asyncLog=0";
Connection connection = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
printDatabase(connection);
connection.close();
}
private static void setParameterInProperties() throws SQLException {
String jdbcUrl = "jdbc:TAOS://" + host + ":6030/";
Properties properties = new Properties();
properties.setProperty("user", "root");
properties.setProperty("password", "taosdata");
properties.setProperty("debugFlag", "135");
properties.setProperty("asyncLog", "0");
properties.setProperty("maxSQLLength", "1048576");
try (Connection conn = DriverManager.getConnection(jdbcUrl, properties)) {
printDatabase(conn);
}
}
private static void printDatabase(Connection connection) throws SQLException {
try (Statement stmt = connection.createStatement()) {
ResultSet rs = stmt.executeQuery("show databases");
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
}
System.out.println();
}
}
}
}
```
## <a class="anchor" id="subscribe"></a>订阅 ## <a class="anchor" id="subscribe"></a>订阅
### 创建 ### 创建
......
...@@ -7,11 +7,21 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ ...@@ -7,11 +7,21 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
### 安装Grafana ### 安装Grafana
目前 TDengine 支持 Grafana 6.2 以上的版本。用户可以根据当前的操作系统,到 Grafana 官网下载安装包,并执行安装。下载地址如下:https://grafana.com/grafana/download 目前 TDengine 支持 Grafana 7.0 以上的版本。用户可以根据当前的操作系统,到 Grafana 官网下载安装包,并执行安装。下载地址如下:<https://grafana.com/grafana/download>
### 配置Grafana ### 配置Grafana
TDengine 的 Grafana 插件请从 <https://github.com/taosdata/grafanaplugin/releases/latest> 下载。 TDengine 的 Grafana 插件托管在GitHub,可从 <https://github.com/taosdata/grafanaplugin/releases/latest> 下载,当前最新版本为 3.1.3。
推荐使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件安装。
```bash
sudo -u grafana grafana-cli \
--pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip \
plugins install tdengine-datasource
```
或者下载到本地并解压到 Grafana 插件目录。
```bash ```bash
GF_VERSION=3.1.3 GF_VERSION=3.1.3
...@@ -31,11 +41,18 @@ Grafana 7.3+ / 8.x 版本会对插件进行签名检查,因此还需要在 gra ...@@ -31,11 +41,18 @@ Grafana 7.3+ / 8.x 版本会对插件进行签名检查,因此还需要在 gra
allow_loading_unsigned_plugins = tdengine-datasource allow_loading_unsigned_plugins = tdengine-datasource
``` ```
在Docker环境下,可以使用如下的环境变量设置自动安装并设置 TDengine 插件:
```bash
GF_INSTALL_PLUGINS=https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip;tdengine-datasource
GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
```
### 使用 Grafana ### 使用 Grafana
#### 配置数据源 #### 配置数据源
用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示: 用户可以直接通过 <http://localhost:3000> 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
![img](../images/connections/add_datasource1.jpg) ![img](../images/connections/add_datasource1.jpg)
......
...@@ -45,7 +45,7 @@ arbitrator ha.taosdata.com:6042 ...@@ -45,7 +45,7 @@ arbitrator ha.taosdata.com:6042
一定要修改的参数是firstEp和fqdn。在每个数据节点,firstEp需全部配置成一样,**但fqdn一定要配置成其所在数据节点的值**。其他参数可不做任何修改,除非你很清楚为什么要修改。 一定要修改的参数是firstEp和fqdn。在每个数据节点,firstEp需全部配置成一样,**但fqdn一定要配置成其所在数据节点的值**。其他参数可不做任何修改,除非你很清楚为什么要修改。
**加入到集群中的数据节点dnode,涉及集群相关的下表11项参数必须完全相同,否则不能成功加入到集群中。** **加入到集群中的数据节点dnode,涉及集群相关的下表9项参数必须完全相同,否则不能成功加入到集群中。**
| **#** | **配置参数名称** | **含义** | | **#** | **配置参数名称** | **含义** |
| ----- | ------------------ | ---------------------------------------- | | ----- | ------------------ | ---------------------------------------- |
...@@ -68,6 +68,8 @@ arbitrator ha.taosdata.com:6042 ...@@ -68,6 +68,8 @@ arbitrator ha.taosdata.com:6042
``` ```
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0 Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
taos> show dnodes; taos> show dnodes;
......
...@@ -48,11 +48,12 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 ...@@ -48,11 +48,12 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL | | 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | | 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | | 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | | 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL | | 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL |
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL | | 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL |
| 9 | BOOL | 1 | 布尔型,{true, false} | | 9 | BOOL | 1 | 布尔型,{true, false} |
| 10 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 | | 10 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
| 11 | JSON | | json数据类型, 只有tag类型可以是json格式 |
<!-- REPLACE_OPEN_TO_ENTERPRISE__COLUMN_TYPE_ADDONS --> <!-- REPLACE_OPEN_TO_ENTERPRISE__COLUMN_TYPE_ADDONS -->
**Tips**: **Tips**:
...@@ -682,6 +683,48 @@ taos> SELECT SERVER_STATUS() AS status; ...@@ -682,6 +683,48 @@ taos> SELECT SERVER_STATUS() AS status;
Query OK, 1 row(s) in set (0.000081s) Query OK, 1 row(s) in set (0.000081s)
``` ```
函数_block_dist()使用说明
<br/>语法
SELECT _block_dist() FROM { tb_name | stb_name }
功能说明:获得指定的(超级)表的数据块分布信息
返回结果类型:字符串。
适用数据类型:不能输入任何参数。
嵌套子查询支持:不支持子查询或嵌套查询。
说明:
返回 FROM 子句中输入的表或超级表的数据块分布情况。不支持查询条件。
返回的结果是该表或超级表的数据块所包含的行数的数据分布直方图。
返回结果如下:
```
summary:
5th=[392], 10th=[392], 20th=[392], 30th=[392], 40th=[792], 50th=[792] 60th=[792], 70th=[792], 80th=[792], 90th=[792], 95th=[792], 99th=[792] Min=[392(Rows)] Max=[800(Rows)] Avg=[666(Rows)] Stddev=[2.17] Rows=[2000], Blocks=[3], Size=[5.440(Kb)] Comp=[0.23] RowsInMem=[0] SeekHeaderTime=[1(us)]
```
上述信息的说明如下:
<br/>1、查询的(超级)表所包含的存储在文件中的数据块(data block)中所包含的数据行的数量分布直方图信息:5%, 10%, 20%, 30%, 40%, 50%, 60%, 70%, 80%, 90%, 95%, 99% 的数值;
<br/>2、所有数据块中,包含行数最少的数据块所包含的行数量, 其中的 Min 指标 392 行。
<br/>3、所有数据块中,包含行数最多的数据块所包含的行数量, 其中的 Max 指标 800 行。
<br/>4、所有数据块行数的算数平均值 666行(其中的 Avg 项)。
<br/>5、所有数据块中行数分布的均方差为 2.17 ( stddev )。
<br/>6、数据块包含的行的总数为 2000 行(Rows)。
<br/>7、数据块总数是 3 个数据块 (Blocks)。
<br/>8、数据块占用磁盘空间大小 5.44 Kb (size)。
<br/>9、压缩后的数据块的大小除以原始数据的所获得的压缩比例: 23%(Comp),及压缩后的数据规模是原始数据规模的 23%。
<br/>10、内存中存在的数据行数是0,表示内存中没有数据缓存。
<br/>11、获取数据块信息的过程中读取头文件的时间开销 1 微秒(SeekHeaderTime)。
支持版本:指定计算算法的功能从2.1.0.x 版本开始,2.1.0.0之前的版本不支持指定使用算法的功能。
#### TAOS SQL中特殊关键词 #### TAOS SQL中特殊关键词
> TBNAME: 在超级表查询中可视为一个特殊的标签,代表查询涉及的子表名<br> > TBNAME: 在超级表查询中可视为一个特殊的标签,代表查询涉及的子表名<br>
...@@ -1603,6 +1646,15 @@ TAOS SQL 支持对标签、TBNAME 进行 GROUP BY 操作,也支持普通列进 ...@@ -1603,6 +1646,15 @@ TAOS SQL 支持对标签、TBNAME 进行 GROUP BY 操作,也支持普通列进
IS NOT NULL 支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。 IS NOT NULL 支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
**ORDER BY的限制**
- 非超级表只能有一个order by.
- 超级表最多两个order by, 并且第二个必须为ts.
- order by tag,必须和group by tag一起,并且是同一个tag。 tbname和tag一样逻辑。 只适用于超级表
- order by 普通列,必须和group by一起或者和top/bottom一起,并且是同一个普通列。 适用于超级表和普通表。如果同时存在 group by和 top/bottom一起,order by优先必须和group by同一列。
- order by ts. 适用于超级表和普通表。
- order by ts同时含有group by时 针对group内部用ts排序
## 表(列)名合法性说明 ## 表(列)名合法性说明
TDengine 中的表(列)名命名规则如下: TDengine 中的表(列)名命名规则如下:
只能由字母、数字、下划线构成,数字不能在首位,长度不能超过192字节,不区分大小写。 只能由字母、数字、下划线构成,数字不能在首位,长度不能超过192字节,不区分大小写。
...@@ -1618,3 +1670,87 @@ TDengine 中的表(列)名命名规则如下: ...@@ -1618,3 +1670,87 @@ TDengine 中的表(列)名命名规则如下:
支持版本 支持版本
支持转义符的功能从 2.3.0.1 版本开始。 支持转义符的功能从 2.3.0.1 版本开始。
## Json类型使用说明
- 语法说明
1. 创建json类型tag
```mysql
create stable s1 (ts timestamp, v1 int) tags (info json)
create table s1_1 using s1 tags ('{"k1": "v1"}')
```
3. json取值操作符 ->
```mysql
select * from s1 where info->'k1' = 'v1'
select info->'k1' from s1
```
4. json key是否存在操作符 contains
```mysql
select * from s1 where info contains 'k2'
select * from s1 where info contains 'k1'
```
- 支持的操作
1. 在where条件中时,支持函数match/nmatch/between and/like/and/or/is null/is no null,不支持in
```mysql
select * from s1 where info→'k1' match 'v*';
select * from s1 where info→'k1' like 'v%' and info contains 'k2';
select * from s1 where info is null;
select * from s1 where info->'k1' is not null
```
2. 支持json tag放在group by、order by、join子句、union all以及子查询中,比如group by json->'key'
3. 支持distinct操作.
```mysql
select distinct info→'k1' from s1
```
5. 标签操作
支持修改json标签值(全量覆盖)
支持修改json标签名
不支持添加json标签、删除json标签、修改json标签列宽
- 其他约束条件
1. 只有标签列可以使用json类型,如果用json标签,标签列只能有一个。
2. 长度限制:json 中key的长度不能超过256,并且key必须为可打印ascii字符;json字符串总长度不超过4096个字节。
3. json格式限制:
1. json输入字符串可以为空("","\t"," "或null)或object,不能为非空的字符串,布尔型和数组。
2. object 可为{},如果object为{},则整个json串记为空。key可为"",若key为"",则json串中忽略该k-v对。
3. value可以为数字(int/double)或字符串或bool或null,暂不可以为数组。不允许嵌套。
4. 若json字符串中出现两个相同的key,则第一个生效。
5. json字符串里暂不支持转义。
4. 当查询json中不存在的key时,返回NULL
5. 当json tag作为子查询结果时,不再支持上层查询继续对子查询中的json串做解析查询。
比如暂不支持
```mysql
select jtag→'key' from (select jtag from stable)
```
不支持
```mysql
select jtag->'key' from (select jtag from stable) where jtag->'key'>0
```
...@@ -33,8 +33,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ...@@ -33,8 +33,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
### 下载 TDengine 插件到 grafana 插件目录 ### 下载 TDengine 插件到 grafana 插件目录
```bash ```bash
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip
2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ 2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service 5. sudo systemctl restart grafana-server.service
......
...@@ -32,8 +32,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ...@@ -32,8 +32,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
### 复制 TDengine 插件到 grafana 插件目录 ### 复制 TDengine 插件到 grafana 插件目录
```bash ```bash
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip
2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ 2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service 5. sudo systemctl restart grafana-server.service
......
...@@ -575,8 +575,67 @@ public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException ...@@ -575,8 +575,67 @@ public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
``` ```
### Set client configuration in JDBC
Starting with TDEngine-2.3.5.0, JDBC Driver supports setting TDengine client parameters on the first connection of a Java application. The Driver supports jdbcUrl and Properties to set client parameters in JDBC-JNI mode.
### Data Subscription Note:
* JDBC-RESTful does not support setting client parameters.
* The client parameters set in the java application are process-level. To update the client parameters, the application needs to be restarted. This is because these client parameters are global that take effect the first time the application is set up.
* The following sample code is based on taos-jdbcdriver-2.0.36.
Sample Code:
```java
public class ClientParameterSetting {
private static final String host = "127.0.0.1";
public static void main(String[] args) throws SQLException {
setParameterInJdbcUrl();
setParameterInProperties();
}
private static void setParameterInJdbcUrl() throws SQLException {
String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?debugFlag=135&asyncLog=0";
Connection connection = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
printDatabase(connection);
connection.close();
}
private static void setParameterInProperties() throws SQLException {
String jdbcUrl = "jdbc:TAOS://" + host + ":6030/";
Properties properties = new Properties();
properties.setProperty("user", "root");
properties.setProperty("password", "taosdata");
properties.setProperty("debugFlag", "135");
properties.setProperty("asyncLog", "0");
properties.setProperty("maxSQLLength", "1048576");
try (Connection conn = DriverManager.getConnection(jdbcUrl, properties)) {
printDatabase(conn);
}
}
private static void printDatabase(Connection connection) throws SQLException {
try (Statement stmt = connection.createStatement()) {
ResultSet rs = stmt.executeQuery("show databases");
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
}
System.out.println();
}
}
}
}
```
## Data Subscription
#### Subscribe #### Subscribe
......
...@@ -6,25 +6,47 @@ TDengine can be quickly integrated with [Grafana](https://www.grafana.com/), an ...@@ -6,25 +6,47 @@ TDengine can be quickly integrated with [Grafana](https://www.grafana.com/), an
### Install Grafana ### Install Grafana
TDengine currently supports Grafana 6.2 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows: TDengine currently supports Grafana 7.0 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows: <https://grafana.com/grafana/download>.
https://grafana.com/grafana/download.
### Configure Grafana ### Configure Grafana
Download grafana plugin from <https://github.com/taosdata/grafanaplugin/releases/latest> . TDengine data source plugin for Grafana is hosted on GitHub, refer to GitHub latest release page <https://github.com/taosdata/grafanaplugin/releases/latest> to download the latest plugin package. Currently it's version 3.1.3 .
It is recommended to use [`grafana-cli` command line tool](https://grafana.com/docs/grafana/latest/administration/cli/) to install the plugin.
```bash
sudo -u grafana grafana-cli \
--pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip \
plugins install tdengine-datasource
```
Users could manually download the plugin package and install it to Grafana plugins directory.
```bash ```bash
GF_VERSION=3.1.3 GF_VERSION=3.1.3
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
``` ```
Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana. Taking Centos 7.2 as an example, just unpack the package to /var/lib/grafana/plugins directory and restart Grafana.
```bash ```bash
sudo unzip tdengine-datasource-$GF_VERSION.zip /var/lib/grafana/plugins/ sudo unzip tdengine-datasource-$GF_VERSION.zip /var/lib/grafana/plugins/
``` ```
Grafana will check the signature after 7.3 and 8.x for security. Users need additional configurations in `grafana.ini` file to allow unsigned plugins like TDengine data source.
```ini
[plugins]
allow_loading_unsigned_plugins = tdengine-datasource
```
In docker/compose/k8s, simply setting the two environment variables will take it all for you.
```bash
GF_INSTALL_PLUGINS=https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip;tdengine-datasource
GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
```
### Use Grafana ### Use Grafana
#### Configure data source #### Configure data source
......
...@@ -53,7 +53,7 @@ In TDengine, the following 10 data types can be used in data model of an ordinar ...@@ -53,7 +53,7 @@ In TDengine, the following 10 data types can be used in data model of an ordinar
| 8 | TINYINT | 1 | A nullable integer type with a range of [-127, 127] | | 8 | TINYINT | 1 | A nullable integer type with a range of [-127, 127] |
| 9 | BOOL | 1 | Boolean type,{true, false} | | 9 | BOOL | 1 | Boolean type,{true, false} |
| 10 | NCHAR | Custom | Used to record non-ASCII strings, such as Chinese characters. Each nchar character takes up 4 bytes of storage space. Single quotation marks are used at both ends of the string, and escape characters are required for single quotation marks in the string, that is \’. When nchar is used, the string size must be specified. A column of type nchar (10) indicates that the string of this column stores up to 10 nchar characters, which will take up 40 bytes of space. If the length of the user string exceeds the declared length, an error will be reported. | | 10 | NCHAR | Custom | Used to record non-ASCII strings, such as Chinese characters. Each nchar character takes up 4 bytes of storage space. Single quotation marks are used at both ends of the string, and escape characters are required for single quotation marks in the string, that is \’. When nchar is used, the string size must be specified. A column of type nchar (10) indicates that the string of this column stores up to 10 nchar characters, which will take up 40 bytes of space. If the length of the user string exceeds the declared length, an error will be reported. |
| 11 | JSON | | Json type,only support for tag |
**Tips**: **Tips**:
...@@ -1245,3 +1245,92 @@ TAOS SQL supports join columns of two tables by Primary Key timestamp between th ...@@ -1245,3 +1245,92 @@ TAOS SQL supports join columns of two tables by Primary Key timestamp between th
**Availability of is no null** **Availability of is no null**
Is not null supports all types of columns. Non-null expression is < > "" and only applies to columns of non-numeric types. Is not null supports all types of columns. Non-null expression is < > "" and only applies to columns of non-numeric types.
**Restrictions on order by**
- A non super table can only have one order by.
- The super table can have at most two order by expression, and the second must be ts.
- Order by tag must be the same tag as group by tag. TBNAME is as logical as tag.
- Order by ordinary column must be the same ordinary column as group by or top/bottom. If both group by and top / bottom exist, order by must be in the same column as group by.
- There are both order by and group by. The internal of the group is sorted by ts
- Order by ts.
## JSON type instructions
- Syntax description
1. Create JSON type tag
```mysql
create stable s1 (ts timestamp, v1 int) tags (info json)
create table s1_1 using s1 tags ('{"k1": "v1"}')
```
3. JSON value operator(->)
```mysql
select * from s1 where info->'k1' = 'v1'
select info->'k1' from s1
```
4. JSON key existence operator(contains)
```mysql
select * from s1 where info contains 'k2'
select * from s1 where info contains 'k1'
```
- Supported operations
1. In where condition,support match/nmatch/between and/like/and/or/is null/is no null,in operator is not support.
```mysql
select * from s1 where info→'k1' match 'v*';
select * from s1 where info→'k1' like 'v%' and info contains 'k2';
select * from s1 where info is null;
select * from s1 where info->'k1' is not null
```
2. JSON tag is supported in group by、order by、join clause、union all and subquery,like group by json->'key'
3. Support distinct operator.
```mysql
select distinct info→'k1' from s1
```
5. Tag
Support change JSON tag(full coverage)
Support change the name of JSON tag
Not support add JSON tag, delete JSON tag
- Other constraints
1. Only tag columns can use JSON type. If JSON tag is used, there can only be one tag column.
2. Length limit:The length of the key in JSON cannot exceed 256, and the key must be printable ASCII characters; The total length of JSON string does not exceed 4096 bytes.
3. JSON format restrictions:
1. JSON input string can be empty (""," ","\t" or null) or object, and cannot be nonempty string, boolean or array.
2. Object can be {}, if the object is {}, the whole JSON string is marked as empty. The key can be "", if the key is "", the K-V pair will be ignored in the JSON string.
3. Value can be a number (int/double) or string, bool or null, not an array. Nesting is not allowed.
4. If two identical keys appear in the JSON string, the first one will take effect.
5. Escape is not supported in JSON string.
4. Null is returned when querying the key that does not exist in JSON.
5. When JSON tag is used as the sub query result, parsing and querying the JSON string in the sub query is no longer supported in the upper level query.
The following query is not supported:
```mysql
select jtag→'key' from (select jtag from stable)
select jtag->'key' from (select jtag from stable) where jtag->'key'>0
```
...@@ -33,8 +33,8 @@ Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official ...@@ -33,8 +33,8 @@ Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official
### Download TDengine plugin to Grafana plugin's directory ### Download TDengine plugin to Grafana plugin's directory
```bash ```bash
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip
2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ 2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service 5. sudo systemctl restart grafana-server.service
......
...@@ -32,8 +32,8 @@ Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official ...@@ -32,8 +32,8 @@ Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official
### Download TDengine plugin to Grafana plugin's directory ### Download TDengine plugin to Grafana plugin's directory
```bash ```bash
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip
2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ 2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service 5. sudo systemctl restart grafana-server.service
......
...@@ -494,11 +494,13 @@ else ...@@ -494,11 +494,13 @@ else
exit 1 exit 1
fi fi
CORES=`grep -c ^processor /proc/cpuinfo`
if [[ "$allocator" == "jemalloc" ]]; then if [[ "$allocator" == "jemalloc" ]]; then
# jemalloc need compile first, so disable parallel build # jemalloc need compile first, so disable parallel build
make -j 8 && ${csudo}make install make -j ${CORES} && ${csudo}make install
else else
make -j 8 && ${csudo}make install make -j ${CORES} && ${csudo}make install
fi fi
cd ${curr_dir} cd ${curr_dir}
......
...@@ -43,9 +43,11 @@ if [ "$osType" != "Darwin" ]; then ...@@ -43,9 +43,11 @@ if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
#strip ${build_dir}/bin/taosd #strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" bin_files="${build_dir}/bin/taos \
${script_dir}/remove_client.sh"
else else
bin_files="${script_dir}/remove_client.sh \ bin_files="${build_dir}/bin/taos \
${script_dir}/remove_client.sh \
${script_dir}/set_core.sh \ ${script_dir}/set_core.sh \
${script_dir}/get_client.sh" ${script_dir}/get_client.sh"
#${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb" #${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb"
......
...@@ -9,3 +9,8 @@ INCLUDE_DIRECTORIES(inc) ...@@ -9,3 +9,8 @@ INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC) AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY(balance ${SRC}) ADD_LIBRARY(balance ${SRC})
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ADD_DEPENDENCIES(balance jemalloc)
ENDIF ()
...@@ -128,12 +128,13 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { ...@@ -128,12 +128,13 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
// type length // type length
int32_t bytes = pSchema[i].bytes; int32_t bytes = pSchema[i].bytes;
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 2); pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 2);
if (pSchema[i].type == TSDB_DATA_TYPE_BINARY || pSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
if (pSchema[i].type == TSDB_DATA_TYPE_BINARY){
bytes -= VARSTR_HEADER_SIZE; bytes -= VARSTR_HEADER_SIZE;
}
if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { else if(pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) {
bytes = bytes / TSDB_NCHAR_SIZE; bytes -= VARSTR_HEADER_SIZE;
} bytes = bytes / TSDB_NCHAR_SIZE;
} }
*(int32_t *)(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 2) * totalNumOfRows + pField->bytes * i) = bytes; *(int32_t *)(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 2) * totalNumOfRows + pField->bytes * i) = bytes;
......
...@@ -996,9 +996,16 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -996,9 +996,16 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return code; return code;
} }
// set the command/global limit parameters from the first subclause to the sqlcmd object // set the command/global limit parameters from the first not empty subclause to the sqlcmd object
pCmd->active = pCmd->pQueryInfo; SQueryInfo* queryInfo = pCmd->pQueryInfo;
pCmd->command = pCmd->pQueryInfo->command; int16_t command = queryInfo->command;
while (command == TSDB_SQL_RETRIEVE_EMPTY_RESULT && queryInfo->sibling != NULL) {
queryInfo = queryInfo->sibling;
command = queryInfo->command;
}
pCmd->active = queryInfo;
pCmd->command = command;
STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pCmd->active, 0); STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pCmd->active, 0);
if (pTableMetaInfo1->pTableMeta != NULL) { if (pTableMetaInfo1->pTableMeta != NULL) {
...@@ -4512,13 +4519,16 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr, ...@@ -4512,13 +4519,16 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr,
if (TSDB_FUNC_IS_SCALAR(functionId)) { if (TSDB_FUNC_IS_SCALAR(functionId)) {
code = validateSQLExprItem(pCmd, pParamElem->pNode, pQueryInfo, pList, childrenTypes + i, uid, childrenHeight+i); code = validateSQLExprItem(pCmd, pParamElem->pNode, pQueryInfo, pList, childrenTypes + i, uid, childrenHeight+i);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
free(childrenTypes); tfree(childrenTypes);
tfree(childrenHeight);
return code; return code;
} }
} }
if (!TSDB_FUNC_IS_SCALAR(functionId) && if (!TSDB_FUNC_IS_SCALAR(functionId) &&
(pParamElem->pNode->type == SQL_NODE_EXPR || pParamElem->pNode->type == SQL_NODE_SQLFUNCTION)) { (pParamElem->pNode->type == SQL_NODE_EXPR || pParamElem->pNode->type == SQL_NODE_SQLFUNCTION)) {
tfree(childrenTypes);
tfree(childrenHeight);
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
} }
...@@ -4540,6 +4550,8 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr, ...@@ -4540,6 +4550,8 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr,
*height = maxChildrenHeight + 1; *height = maxChildrenHeight + 1;
if (anyChildAgg && anyChildScalar) { if (anyChildAgg && anyChildScalar) {
tfree(childrenTypes);
tfree(childrenHeight);
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
} }
if (anyChildAgg) { if (anyChildAgg) {
...@@ -4551,7 +4563,8 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr, ...@@ -4551,7 +4563,8 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr,
*type = SQLEXPR_TYPE_AGG; *type = SQLEXPR_TYPE_AGG;
} }
} }
free(childrenTypes); tfree(childrenTypes);
tfree(childrenHeight);
//end if param list is not null //end if param list is not null
} else { } else {
if (TSDB_FUNC_IS_SCALAR(functionId)) { if (TSDB_FUNC_IS_SCALAR(functionId)) {
...@@ -6312,7 +6325,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq ...@@ -6312,7 +6325,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
const char* msg0 = "only one column allowed in orderby"; const char* msg0 = "only one column allowed in orderby";
const char* msg1 = "invalid column name in orderby clause"; const char* msg1 = "invalid column name in orderby clause";
const char* msg2 = "too many order by columns"; const char* msg2 = "too many order by columns";
const char* msg3 = "only primary timestamp, first tag/tbname in groupby clause allowed as order column"; const char* msg3 = "only primary timestamp, tag/tbname in groupby clause allowed as order column";
const char* msg4 = "only tag in groupby clause allowed in order clause"; const char* msg4 = "only tag in groupby clause allowed in order clause";
const char* msg5 = "only primary timestamp/column in top/bottom function allowed as order column"; const char* msg5 = "only primary timestamp/column in top/bottom function allowed as order column";
const char* msg6 = "only primary timestamp allowed as the second order column"; const char* msg6 = "only primary timestamp allowed as the second order column";
...@@ -6334,8 +6347,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq ...@@ -6334,8 +6347,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
SArray* pSortOrder = pSqlNode->pSortOrder; SArray* pSortOrder = pSqlNode->pSortOrder;
/* /*
* for table query, there is only one or none order option is allowed, which is the * for table query, there is only one or none order option is allowed
* ts or values(top/bottom) order is supported.
* *
* for super table query, the order option must be less than 3 and the second must be ts. * for super table query, the order option must be less than 3 and the second must be ts.
* *
...@@ -6410,7 +6422,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq ...@@ -6410,7 +6422,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return invalidOperationMsg(pMsgBuf, msg4); return invalidOperationMsg(pMsgBuf, msg4);
} }
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
if (relTagIndex == pColIndex->colIndex) { if (relTagIndex == pColIndex->colIndex && pColIndex->flag == TSDB_COL_TAG) {
if (tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, pColIndex->colId)->type == TSDB_DATA_TYPE_JSON){ if (tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, pColIndex->colId)->type == TSDB_DATA_TYPE_JSON){
if(!pItem->isJsonExp){ if(!pItem->isJsonExp){
return invalidOperationMsg(pMsgBuf, msg14); return invalidOperationMsg(pMsgBuf, msg14);
...@@ -6863,7 +6875,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -6863,7 +6875,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tscError("json type error, should be string"); tscError("json type error, should be string");
return invalidOperationMsg(pMsg, msg25); return invalidOperationMsg(pMsg, msg25);
} }
if (pItem->pVar.nType > TSDB_MAX_JSON_TAGS_LEN / TSDB_NCHAR_SIZE) { if (pItem->pVar.nLen > TSDB_MAX_JSON_TAGS_LEN / TSDB_NCHAR_SIZE) {
tscError("json tag too long"); tscError("json tag too long");
return invalidOperationMsg(pMsg, msg14); return invalidOperationMsg(pMsg, msg14);
} }
......
...@@ -774,11 +774,12 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo ...@@ -774,11 +774,12 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo
memcpy(dst, p, varDataTLen(p)); memcpy(dst, p, varDataTLen(p));
} else if (varDataLen(p) > 0) { } else if (varDataLen(p) > 0) {
int32_t length = taosUcs4ToMbs(varDataVal(p), varDataLen(p), varDataVal(dst)); int32_t length = taosUcs4ToMbs(varDataVal(p), varDataLen(p), varDataVal(dst));
varDataSetLen(dst, length); if (length <= 0) {
if (length == 0) {
tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p);
} }
if (length >= 0){
varDataSetLen(dst, length);
}
} else { } else {
varDataSetLen(dst, 0); varDataSetLen(dst, 0);
} }
...@@ -809,18 +810,23 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo ...@@ -809,18 +810,23 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo
varDataSetLen(dst, strlen(varDataVal(dst))); varDataSetLen(dst, strlen(varDataVal(dst)));
}else if (type == TSDB_DATA_TYPE_JSON) { }else if (type == TSDB_DATA_TYPE_JSON) {
int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), varDataVal(dst)); int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), varDataVal(dst));
varDataSetLen(dst, length);
if (length == 0) { if (length <= 0) {
tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p);
} }
if (length >= 0){
varDataSetLen(dst, length);
}
}else if (type == TSDB_DATA_TYPE_NCHAR) { // value -> "value" }else if (type == TSDB_DATA_TYPE_NCHAR) { // value -> "value"
*(char*)varDataVal(dst) = '\"'; *(char*)varDataVal(dst) = '\"';
int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), POINTER_SHIFT(varDataVal(dst), CHAR_BYTES)); int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), POINTER_SHIFT(varDataVal(dst), CHAR_BYTES));
*(char*)(POINTER_SHIFT(varDataVal(dst), length + CHAR_BYTES)) = '\"'; if (length <= 0) {
varDataSetLen(dst, length + CHAR_BYTES*2);
if (length == 0) {
tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p);
} }
if (length >= 0){
varDataSetLen(dst, length + CHAR_BYTES*2);
*(char*)(POINTER_SHIFT(varDataVal(dst), length + CHAR_BYTES)) = '\"';
}
}else if (type == TSDB_DATA_TYPE_DOUBLE) { }else if (type == TSDB_DATA_TYPE_DOUBLE) {
double jsonVd = *(double*)(realData); double jsonVd = *(double*)(realData);
sprintf(varDataVal(dst), "%.9lf", jsonVd); sprintf(varDataVal(dst), "%.9lf", jsonVd);
...@@ -5186,7 +5192,8 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt ...@@ -5186,7 +5192,8 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
} }
// global aggregate query // global aggregate query
if (pQueryAttr->stableQuery && (pQueryAttr->simpleAgg || pQueryAttr->interval.interval > 0) && tscIsTwoStageSTableQuery(pQueryInfo, 0)) { if (pQueryAttr->stableQuery && (pQueryAttr->simpleAgg || pQueryAttr->interval.interval > 0 || pQueryAttr->sw.gap > 0)
&& tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
createGlobalAggregateExpr(pQueryAttr, pQueryInfo); createGlobalAggregateExpr(pQueryAttr, pQueryInfo);
} }
...@@ -5514,10 +5521,10 @@ int parseJsontoTagData(char* json, SKVRowBuilder* kvRowBuilder, char* errMsg, in ...@@ -5514,10 +5521,10 @@ int parseJsontoTagData(char* json, SKVRowBuilder* kvRowBuilder, char* errMsg, in
char *tagVal = calloc(strlen(jsonValue) * TSDB_NCHAR_SIZE + TSDB_NCHAR_SIZE, 1); char *tagVal = calloc(strlen(jsonValue) * TSDB_NCHAR_SIZE + TSDB_NCHAR_SIZE, 1);
*tagVal = jsonType2DbType(0, item->type); // type *tagVal = jsonType2DbType(0, item->type); // type
char* tagData = POINTER_SHIFT(tagVal,CHAR_BYTES); char* tagData = POINTER_SHIFT(tagVal,CHAR_BYTES);
if (!taosMbsToUcs4(jsonValue, strlen(jsonValue), varDataVal(tagData), if (strlen(jsonValue) > 0 && !taosMbsToUcs4(jsonValue, strlen(jsonValue), varDataVal(tagData),
(int32_t)(strlen(jsonValue) * TSDB_NCHAR_SIZE), &outLen)) { (int32_t)(strlen(jsonValue) * TSDB_NCHAR_SIZE), &outLen)) {
tscError("json string error:%s|%s", strerror(errno), jsonValue); tscError("charset:%s to %s. val:%s, errno:%s, convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, jsonValue, strerror(errno));
retCode = tscSQLSyntaxErrMsg(errMsg, "serizelize json error", NULL); retCode = tscSQLSyntaxErrMsg(errMsg, "charset convert json error", NULL);
free(tagVal); free(tagVal);
goto end; goto end;
} }
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include "tskiplist.h" #include "tskiplist.h"
#include "texpr.h" #include "texpr.h"
#include "tarithoperator.h" #include "tarithoperator.h"
#include "tulog.h"
static int32_t exprValidateMathNode(tExprNode *pExpr); static int32_t exprValidateMathNode(tExprNode *pExpr);
static int32_t exprValidateStringConcatNode(tExprNode *pExpr); static int32_t exprValidateStringConcatNode(tExprNode *pExpr);
...@@ -1274,6 +1275,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out ...@@ -1274,6 +1275,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out
} else if (inputType == TSDB_DATA_TYPE_NCHAR) { } else if (inputType == TSDB_DATA_TYPE_NCHAR) {
char *newColData = calloc(1, outputBytes * TSDB_NCHAR_SIZE + 1); char *newColData = calloc(1, outputBytes * TSDB_NCHAR_SIZE + 1);
int len = taosUcs4ToMbs(varDataVal(input), varDataLen(input), newColData); int len = taosUcs4ToMbs(varDataVal(input), varDataLen(input), newColData);
if (len < 0){
uError("castConvert taosUcs4ToMbs error 1");
tfree(newColData);
return;
}
newColData[len] = 0; newColData[len] = 0;
*(int64_t *)output = strtoll(newColData, NULL, 10); *(int64_t *)output = strtoll(newColData, NULL, 10);
tfree(newColData); tfree(newColData);
...@@ -1291,6 +1297,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out ...@@ -1291,6 +1297,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out
} else if (inputType == TSDB_DATA_TYPE_NCHAR) { } else if (inputType == TSDB_DATA_TYPE_NCHAR) {
char *newColData = calloc(1, outputBytes * TSDB_NCHAR_SIZE + 1); char *newColData = calloc(1, outputBytes * TSDB_NCHAR_SIZE + 1);
int len = taosUcs4ToMbs(varDataVal(input), varDataLen(input), newColData); int len = taosUcs4ToMbs(varDataVal(input), varDataLen(input), newColData);
if (len < 0){
uError("castConvert taosUcs4ToMbs error 2");
tfree(newColData);
return;
}
newColData[len] = 0; newColData[len] = 0;
*(int64_t *)output = strtoull(newColData, NULL, 10); *(int64_t *)output = strtoull(newColData, NULL, 10);
tfree(newColData); tfree(newColData);
...@@ -1332,11 +1343,19 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out ...@@ -1332,11 +1343,19 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out
if (inputType == TSDB_DATA_TYPE_BOOL) { if (inputType == TSDB_DATA_TYPE_BOOL) {
char tmp[8] = {0}; char tmp[8] = {0};
int32_t len = sprintf(tmp, "%.*s", ncharSize, *(int8_t*)input ? "true" : "false"); int32_t len = sprintf(tmp, "%.*s", ncharSize, *(int8_t*)input ? "true" : "false");
taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); bool ret = taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len);
if(!ret) {
uError("castConvert1 taosMbsToUcs4 error");
return;
}
varDataSetLen(output, len); varDataSetLen(output, len);
} else if (inputType == TSDB_DATA_TYPE_BINARY) { } else if (inputType == TSDB_DATA_TYPE_BINARY) {
int32_t len = ncharSize > varDataLen(input) ? varDataLen(input) : ncharSize; int32_t len = ncharSize > varDataLen(input) ? varDataLen(input) : ncharSize;
taosMbsToUcs4(input + VARSTR_HEADER_SIZE, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); bool ret = taosMbsToUcs4(input + VARSTR_HEADER_SIZE, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len);
if(!ret) {
uError("castConvert2 taosMbsToUcs4 error");
return;
}
varDataSetLen(output, len); varDataSetLen(output, len);
} else if (inputType == TSDB_DATA_TYPE_TIMESTAMP) { } else if (inputType == TSDB_DATA_TYPE_TIMESTAMP) {
assert(0); assert(0);
...@@ -1348,7 +1367,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out ...@@ -1348,7 +1367,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out
char tmp[400] = {0}; char tmp[400] = {0};
NUM_TO_STRING(inputType, input, sizeof(tmp), tmp); NUM_TO_STRING(inputType, input, sizeof(tmp), tmp);
int32_t len = (int32_t)(ncharSize > strlen(tmp) ? strlen(tmp) : ncharSize); int32_t len = (int32_t)(ncharSize > strlen(tmp) ? strlen(tmp) : ncharSize);
taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); bool ret = taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len);
if(!ret) {
uError("castConvert3 taosMbsToUcs4 error");
return;
}
varDataSetLen(output, len); varDataSetLen(output, len);
} }
break; break;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "ttype.h" #include "ttype.h"
#include "tutil.h" #include "tutil.h"
#include "tvariant.h" #include "tvariant.h"
#include "tulog.h"
#define SET_EXT_INFO(converted, res, minv, maxv, exti) do { \ #define SET_EXT_INFO(converted, res, minv, maxv, exti) do { \
if (converted == NULL || exti == NULL || *converted == false) { break; } \ if (converted == NULL || exti == NULL || *converted == false) { break; } \
...@@ -359,8 +360,12 @@ int32_t tVariantToString(tVariant *pVar, char *dst) { ...@@ -359,8 +360,12 @@ int32_t tVariantToString(tVariant *pVar, char *dst) {
case TSDB_DATA_TYPE_NCHAR: { case TSDB_DATA_TYPE_NCHAR: {
dst[0] = '\''; dst[0] = '\'';
taosUcs4ToMbs(pVar->wpz, (twcslen(pVar->wpz) + 1) * TSDB_NCHAR_SIZE, dst + 1); int32_t len = taosUcs4ToMbs(pVar->wpz, (twcslen(pVar->wpz) + 1) * TSDB_NCHAR_SIZE, dst + 1);
int32_t len = (int32_t)strlen(dst); if (len < 0){
uError("castConvert1 taosUcs4ToMbs error");
return 0 ;
}
len = (int32_t)strlen(dst);
dst[len] = '\''; dst[len] = '\'';
dst[len + 1] = 0; dst[len + 1] = 0;
return len + 1; return len + 1;
...@@ -428,11 +433,17 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) { ...@@ -428,11 +433,17 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
pBuf = realloc(pBuf, newSize + 1); pBuf = realloc(pBuf, newSize + 1);
} }
taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, pBuf); int32_t len = taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, pBuf);
if (len < 0){
uError("castConvert1 taosUcs4ToMbs error");
}
free(pVariant->wpz); free(pVariant->wpz);
pBuf[newSize] = 0; pBuf[newSize] = 0;
} else { } else {
taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, *pDest); int32_t len = taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, *pDest);
if (len < 0){
uError("castConvert1 taosUcs4ToMbs error");
}
} }
} else { } else {
......
...@@ -398,5 +398,8 @@ namespace TDengineDriver ...@@ -398,5 +398,8 @@ namespace TDengineDriver
IntPtr stmtErrPrt = StmtErrPtr(stmt); IntPtr stmtErrPrt = StmtErrPtr(stmt);
return Marshal.PtrToStringAnsi(stmtErrPrt); return Marshal.PtrToStringAnsi(stmtErrPrt);
} }
[DllImport("taos", EntryPoint = "taos_fetch_lengths", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FetchLengths(IntPtr taos);
} }
} }
...@@ -9,4 +9,8 @@ ...@@ -9,4 +9,8 @@
<TargetFramework>net5.0</TargetFramework> <TargetFramework>net5.0</TargetFramework>
</PropertyGroup> </PropertyGroup>
<PropertyGroup>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<DocumentationFile>..\doc\FunctionTest.XML</DocumentationFile>
</PropertyGroup>
</Project> </Project>
using System;
using Test.UtilsTools;
using System.Collections.Generic;
namespace Cases
{
public class FetchLengthCase
{
/// <author>xiaolei</author>
/// <Name>TestRetrieveBinary</Name>
/// <describe>TD-12103 C# connector fetch_row with binary data retrieving error</describe>
/// <filename>FetchLength.cs</filename>
/// <result>pass or failed </result>
public void TestRetrieveBinary(IntPtr conn)
{
string sql1 = "create stable stb1 (ts timestamp, name binary(10)) tags(n int);";
string sql2 = "insert into tb1 using stb1 tags(1) values(now, 'log');";
string sql3 = "insert into tb2 using stb1 tags(2) values(now, 'test');";
string sql4 = "insert into tb3 using stb1 tags(3) values(now, 'db02');";
string sql5 = "insert into tb4 using stb1 tags(4) values(now, 'db3');";
string sql6 = "select distinct(name) from stb1;";//
UtilsTools.ExecuteQuery(conn, sql1);
UtilsTools.ExecuteQuery(conn, sql2);
UtilsTools.ExecuteQuery(conn, sql3);
UtilsTools.ExecuteQuery(conn, sql4);
UtilsTools.ExecuteQuery(conn, sql5);
IntPtr resPtr = IntPtr.Zero;
resPtr = UtilsTools.ExecuteQuery(conn, sql6);
List<List<string>> result = UtilsTools.GetResultSet(resPtr);
List<string> colname = result[0];
List<string> data = result[1];
UtilsTools.AssertEqual("db3", data[0]);
UtilsTools.AssertEqual("log", data[1]);
UtilsTools.AssertEqual("db02", data[2]);
UtilsTools.AssertEqual("test", data[3]);
}
}
}
using System; using System;
using Test.UtilsTools; using Test.UtilsTools;
using Cases; using Cases;
namespace Cases.EntryPoint namespace Cases.EntryPoint
{ {
class Program class Program
{ {
static void Main(string[] args) static void Main(string[] args)
{ {
IntPtr conn = IntPtr.Zero; IntPtr conn = IntPtr.Zero;
IntPtr stmt = IntPtr.Zero; IntPtr stmt = IntPtr.Zero;
IntPtr res = IntPtr.Zero; IntPtr res = IntPtr.Zero;
conn = UtilsTools.TDConnection("127.0.0.1", "root", "taosdata", "", 0); conn = UtilsTools.TDConnection("127.0.0.1", "root", "taosdata", "", 0);
UtilsTools.ExecuteQuery(conn, "drop database if exists csharp"); UtilsTools.ExecuteQuery(conn, "drop database if exists csharp");
UtilsTools.ExecuteQuery(conn, "create database if not exists csharp keep 3650"); UtilsTools.ExecuteQuery(conn, "create database if not exists csharp keep 3650");
UtilsTools.ExecuteQuery(conn, "use csharp"); UtilsTools.ExecuteQuery(conn, "use csharp");
Console.WriteLine("====================StableColumnByColumn==================="); Console.WriteLine("====================StableColumnByColumn===================");
StableColumnByColumn columnByColumn = new StableColumnByColumn(); StableColumnByColumn columnByColumn = new StableColumnByColumn();
columnByColumn.Test(conn, "stablecolumnbycolumn"); columnByColumn.Test(conn, "stablecolumnbycolumn");
Console.WriteLine("====================StmtStableQuery==================="); Console.WriteLine("====================StmtStableQuery===================");
StmtStableQuery stmtStableQuery = new StmtStableQuery(); StmtStableQuery stmtStableQuery = new StmtStableQuery();
stmtStableQuery.Test(conn, "stablecolumnbycolumn"); stmtStableQuery.Test(conn, "stablecolumnbycolumn");
Console.WriteLine("====================StableMutipleLine==================="); Console.WriteLine("====================StableMutipleLine===================");
StableMutipleLine mutipleLine = new StableMutipleLine(); StableMutipleLine mutipleLine = new StableMutipleLine();
mutipleLine.Test(conn, "stablemutipleline"); mutipleLine.Test(conn, "stablemutipleline");
//================================================================================ //================================================================================
Console.WriteLine("====================NtableSingleLine==================="); Console.WriteLine("====================NtableSingleLine===================");
NtableSingleLine ntableSingleLine = new NtableSingleLine(); NtableSingleLine ntableSingleLine = new NtableSingleLine();
ntableSingleLine.Test(conn, "stablesingleline"); ntableSingleLine.Test(conn, "stablesingleline");
Console.WriteLine("====================NtableMutipleLine==================="); Console.WriteLine("====================NtableMutipleLine===================");
NtableMutipleLine ntableMutipleLine = new NtableMutipleLine(); NtableMutipleLine ntableMutipleLine = new NtableMutipleLine();
ntableMutipleLine.Test(conn, "ntablemutipleline"); ntableMutipleLine.Test(conn, "ntablemutipleline");
Console.WriteLine("====================StmtNtableQuery==================="); Console.WriteLine("====================StmtNtableQuery===================");
StmtNtableQuery stmtNtableQuery = new StmtNtableQuery(); StmtNtableQuery stmtNtableQuery = new StmtNtableQuery();
stmtNtableQuery.Test(conn, "ntablemutipleline"); stmtNtableQuery.Test(conn, "ntablemutipleline");
Console.WriteLine("====================NtableColumnByColumn==================="); Console.WriteLine("====================NtableColumnByColumn===================");
NtableColumnByColumn ntableColumnByColumn = new NtableColumnByColumn(); NtableColumnByColumn ntableColumnByColumn = new NtableColumnByColumn();
ntableColumnByColumn.Test(conn, "ntablecolumnbycolumn"); ntableColumnByColumn.Test(conn, "ntablecolumnbycolumn");
Console.WriteLine("====================fetchfeilds==================="); Console.WriteLine("====================fetchfeilds===================");
FetchFields fetchFields = new FetchFields(); FetchFields fetchFields = new FetchFields();
fetchFields.Test(conn,"fetchfeilds"); fetchFields.Test(conn, "fetchfeilds");
Console.WriteLine("===================JsonTagTest===================="); Console.WriteLine("===================JsonTagTest====================");
JsonTagTest jsonTagTest = new JsonTagTest(); JsonTagTest jsonTagTest = new JsonTagTest();
jsonTagTest.Test(conn); jsonTagTest.Test(conn);
// UtilsTools.ExecuteQuery(conn, "drop database if exists csharp"); Console.WriteLine("====================fetchLengthCase===================");
UtilsTools.CloseConnection(conn); FetchLengthCase fetchLengthCase = new FetchLengthCase();
UtilsTools.ExitProgram(); fetchLengthCase.TestRetrieveBinary(conn);
UtilsTools.ExecuteQuery(conn, "drop database if exists csharp");
} UtilsTools.CloseConnection(conn);
} UtilsTools.ExitProgram();
}
}
}
}
...@@ -35,7 +35,6 @@ namespace Test.UtilsTools ...@@ -35,7 +35,6 @@ namespace Test.UtilsTools
else else
{ {
Console.WriteLine(sql.ToString() + " success"); Console.WriteLine(sql.ToString() + " success");
} }
return res; return res;
} }
...@@ -83,9 +82,13 @@ namespace Test.UtilsTools ...@@ -83,9 +82,13 @@ namespace Test.UtilsTools
IntPtr rowdata; IntPtr rowdata;
StringBuilder builder = new StringBuilder(); StringBuilder builder = new StringBuilder();
while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero)
{ {
queryRows++; queryRows++;
IntPtr colLengthPtr = TDengine.FetchLengths(res);
int[] colLengthArr = new int[fieldCount];
Marshal.Copy(colLengthPtr, colLengthArr, 0, fieldCount);
for (int fields = 0; fields < fieldCount; ++fields) for (int fields = 0; fields < fieldCount; ++fields)
{ {
TDengineMeta meta = metas[fields]; TDengineMeta meta = metas[fields];
...@@ -131,7 +134,7 @@ namespace Test.UtilsTools ...@@ -131,7 +134,7 @@ namespace Test.UtilsTools
builder.Append(v7); builder.Append(v7);
break; break;
case TDengineDataType.TSDB_DATA_TYPE_BINARY: case TDengineDataType.TSDB_DATA_TYPE_BINARY:
string v8 = Marshal.PtrToStringAnsi(data); string v8 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]);
builder.Append(v8); builder.Append(v8);
break; break;
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
...@@ -139,7 +142,7 @@ namespace Test.UtilsTools ...@@ -139,7 +142,7 @@ namespace Test.UtilsTools
builder.Append(v9); builder.Append(v9);
break; break;
case TDengineDataType.TSDB_DATA_TYPE_NCHAR: case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
string v10 = Marshal.PtrToStringAnsi(data); string v10 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]);
builder.Append(v10); builder.Append(v10);
break; break;
case TDengineDataType.TSDB_DATA_TYPE_JSONTAG: case TDengineDataType.TSDB_DATA_TYPE_JSONTAG:
...@@ -164,6 +167,117 @@ namespace Test.UtilsTools ...@@ -164,6 +167,117 @@ namespace Test.UtilsTools
TDengine.FreeResult(res); Console.WriteLine(""); TDengine.FreeResult(res); Console.WriteLine("");
} }
public static List<List<string>> GetResultSet(IntPtr res)
{
List<List<string>> result = new List<List<string>>();
List<string> colName = new List<string>();
List<string> dataRaw = new List<string>();
long queryRows = 0;
if (!IsValidResult(res))
{
ExitProgram();
}
int fieldCount = TDengine.FieldCount(res);
List<TDengineMeta> metas = TDengine.FetchFields(res);
for (int j = 0; j < metas.Count; j++)
{
TDengineMeta meta = (TDengineMeta)metas[j];
colName.Add(meta.name);
}
result.Add(colName);
IntPtr rowdata;
while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero)
{
queryRows++;
IntPtr colLengthPtr = TDengine.FetchLengths(res);
int[] colLengthArr = new int[fieldCount];
Marshal.Copy(colLengthPtr, colLengthArr, 0, fieldCount);
for (int fields = 0; fields < fieldCount; ++fields)
{
TDengineMeta meta = metas[fields];
int offset = IntPtr.Size * fields;
IntPtr data = Marshal.ReadIntPtr(rowdata, offset);
if (data == IntPtr.Zero)
{
dataRaw.Add("NULL");
continue;
}
switch ((TDengineDataType)meta.type)
{
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
bool v1 = Marshal.ReadByte(data) == 0 ? false : true;
dataRaw.Add(v1.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
byte v2 = Marshal.ReadByte(data);
dataRaw.Add(v2.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
short v3 = Marshal.ReadInt16(data);
dataRaw.Add(v3.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_INT:
int v4 = Marshal.ReadInt32(data);
dataRaw.Add(v4.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
long v5 = Marshal.ReadInt64(data);
dataRaw.Add(v5.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
float v6 = (float)Marshal.PtrToStructure(data, typeof(float));
dataRaw.Add(v6.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
double v7 = (double)Marshal.PtrToStructure(data, typeof(double));
dataRaw.Add(v7.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_BINARY:
string v8 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]);
dataRaw.Add(v8);
break;
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
long v9 = Marshal.ReadInt64(data);
dataRaw.Add(v9.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
string v10 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]);
dataRaw.Add(v10);
break;
}
}
}
result.Add(dataRaw);
if (TDengine.ErrorNo(res) != 0)
{
Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res));
}
TDengine.FreeResult(res); Console.WriteLine("");
return result;
}
public static bool IsValidResult(IntPtr res)
{
if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0))
{
if (res != IntPtr.Zero)
{
Console.Write("reason: " + TDengine.Error(res));
return false;
}
Console.WriteLine("");
return false;
}
return true;
}
public static void CloseConnection(IntPtr conn) public static void CloseConnection(IntPtr conn)
{ {
if (conn != IntPtr.Zero) if (conn != IntPtr.Zero)
...@@ -183,6 +297,18 @@ namespace Test.UtilsTools ...@@ -183,6 +297,18 @@ namespace Test.UtilsTools
List<TDengineMeta> metas = TDengine.FetchFields(res); List<TDengineMeta> metas = TDengine.FetchFields(res);
return metas; return metas;
} }
public static void AssertEqual(string expectVal, string actualVal)
{
if (expectVal == actualVal)
{
Console.WriteLine("{0}=={1} pass", expectVal, actualVal);
}
else
{
Console.WriteLine("{0}=={1} failed", expectVal, actualVal);
ExitProgram();
}
}
public static void ExitProgram() public static void ExitProgram()
{ {
TDengine.Cleanup(); TDengine.Cleanup();
......
...@@ -2,10 +2,14 @@ ...@@ -2,10 +2,14 @@
<PropertyGroup> <PropertyGroup>
<TargetFramework>net5.0</TargetFramework> <TargetFramework>net5.0</TargetFramework>
<IsPackable>false</IsPackable> <IsPackable>false</IsPackable>
</PropertyGroup> </PropertyGroup>
<PropertyGroup>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<DocumentationFile>..\doc\UnitTest.XML</DocumentationFile>
</PropertyGroup>
<ItemGroup> <ItemGroup>
<PackageReference Include="coverlet.msbuild" Version="3.1.0"> <PackageReference Include="coverlet.msbuild" Version="3.1.0">
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets> <IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
......
...@@ -443,16 +443,29 @@ public class TSDBResultSetRowData { ...@@ -443,16 +443,29 @@ public class TSDBResultSetRowData {
case 0: { case 0: {
milliseconds = ts; milliseconds = ts;
fracNanoseconds = (int) (ts * 1_000_000 % 1_000_000_000); fracNanoseconds = (int) (ts * 1_000_000 % 1_000_000_000);
fracNanoseconds = fracNanoseconds < 0 ? 1_000_000_000 + fracNanoseconds : fracNanoseconds;
break; break;
} }
case 1: { case 1: {
milliseconds = ts / 1_000; milliseconds = ts / 1_000;
fracNanoseconds = (int) (ts * 1_000 % 1_000_000_000); fracNanoseconds = (int) (ts * 1_000 % 1_000_000_000);
if (fracNanoseconds < 0) {
if (milliseconds == 0 ){
milliseconds = -1;
}
fracNanoseconds += 1_000_000_000;
}
break; break;
} }
case 2: { case 2: {
milliseconds = ts / 1_000_000; milliseconds = ts / 1_000_000;
fracNanoseconds = (int) (ts % 1_000_000_000); fracNanoseconds = (int) (ts % 1_000_000_000);
if (fracNanoseconds < 0) {
if (milliseconds == 0 ){
milliseconds = -1;
}
fracNanoseconds += 1_000_000_000;
}
break; break;
} }
default: { default: {
......
package com.taosdata.jdbc.cases; package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.annotation.CatalogRunner;
import com.taosdata.jdbc.annotation.Description;
import com.taosdata.jdbc.annotation.TestTarget;
import com.taosdata.jdbc.utils.TimestampUtil; import com.taosdata.jdbc.utils.TimestampUtil;
import org.junit.*; import org.junit.*;
import org.junit.runner.RunWith;
import java.sql.*; import java.sql.*;
@RunWith(CatalogRunner.class)
@TestTarget(alias = "negative value convert to timestamp", author = "huolibo", version = "2.0.37")
public class DatetimeBefore1970Test { public class DatetimeBefore1970Test {
private static final String host = "127.0.0.1"; private static final String host = "127.0.0.1";
private Connection conn; private Connection conn;
@Test @Test
public void test() throws SQLException { @Description("millisecond")
public void msTest() throws SQLException {
conn = createEnvironment("ms");
long now = System.currentTimeMillis();
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
// given // given
// before
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 00:00:00.001')");
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999')"); stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999')");
// zero
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000')"); stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 08:00:00.000')"); //after
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 07:59:59.999')"); stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.001')");
stmt.executeUpdate("insert into weather(ts) values(" + now + ")");
ResultSet rs = stmt.executeQuery("select * from weather order by ts asc"); ResultSet rs = stmt.executeQuery("select * from weather order by ts asc");
ResultSetMetaData metaData = rs.getMetaData(); ResultSetMetaData metaData = rs.getMetaData();
Assert.assertEquals(2, metaData.getColumnCount()); Assert.assertEquals(2, metaData.getColumnCount());
...@@ -26,44 +39,221 @@ public class DatetimeBefore1970Test { ...@@ -26,44 +39,221 @@ public class DatetimeBefore1970Test {
rs.next(); rs.next();
// then // then
Timestamp ts = rs.getTimestamp("ts"); Timestamp ts = rs.getTimestamp("ts");
Assert.assertEquals("1969-12-31 23:59:59.999", TimestampUtil.longToDatetime(ts.getTime())); Assert.assertEquals(-24 * 60 * 60 * 1000 + 1, ts.getTime());
// when // when
rs.next(); rs.next();
// then // then
ts = rs.getTimestamp("ts"); ts = rs.getTimestamp("ts");
Assert.assertEquals("1970-01-01 00:00:00.000", TimestampUtil.longToDatetime(ts.getTime())); Assert.assertEquals(-1, ts.getTime());
// when // when
rs.next(); rs.next();
// then // then
ts = rs.getTimestamp("ts"); ts = rs.getTimestamp("ts");
Assert.assertEquals("1970-01-01 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime())); Assert.assertEquals(0, ts.getTime());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(1, ts.getTime());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(now, ts.getTime());
}
}
@Test
@Description("microsecond")
public void usTest() throws SQLException {
conn = createEnvironment("us");
long now = System.currentTimeMillis();
try (Statement stmt = conn.createStatement()) {
// given
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 00:00:00.000001')");
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999999')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000000')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000001')");
stmt.executeUpdate("insert into weather(ts) values(" + now + ")");
ResultSet rs = stmt.executeQuery("select * from weather order by ts asc");
ResultSetMetaData metaData = rs.getMetaData();
Assert.assertEquals(2, metaData.getColumnCount());
// when
rs.next();
// then
Timestamp ts = rs.getTimestamp("ts");
Assert.assertEquals(-24 * 60 * 60 * 1000, ts.getTime());
Assert.assertEquals(1_000, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(-1, ts.getTime());
Assert.assertEquals(999_999_000, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(0, ts.getTime());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(0, ts.getTime());
Assert.assertEquals(1_000, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
String s = String.valueOf(now);
Assert.assertEquals(Long.parseLong(s.substring(0, s.length() - 3)), ts.getTime());
Assert.assertEquals(Long.parseLong(s.substring(s.length() - 6) + "000"), ts.getNanos());
}
}
@Test
@Description("nanosecond")
public void nanoTest() throws SQLException {
conn = createEnvironment("ns");
long now = System.currentTimeMillis() * 1000_000L + System.nanoTime() % 1000_000L;
try (Statement stmt = conn.createStatement()) {
// given
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 00:00:00.000000123')");
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999999999')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000000001')");
stmt.executeUpdate("insert into weather(ts) values(" + now + ")");
ResultSet rs = stmt.executeQuery("select * from weather order by ts asc");
ResultSetMetaData metaData = rs.getMetaData();
Assert.assertEquals(2, metaData.getColumnCount());
// when
rs.next();
// then
Timestamp ts = rs.getTimestamp("ts");
Assert.assertEquals(-24 * 60 * 60 * 1_000, ts.getTime());
Assert.assertEquals(123, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(-1, ts.getTime());
Assert.assertEquals(999999999, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(0, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(1, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
String s = String.valueOf(now);
Assert.assertEquals(Long.parseLong(s.substring(0, s.length() - 6)), ts.getTime());
Assert.assertEquals(Long.parseLong(s.substring(s.length() - 9)), ts.getNanos());
}
}
@Test
@Ignore
@Description("nanosecond convert timestamp when timezone is asia shanghai")
public void asiaShanghaiTest() throws SQLException {
conn = createEnvironment("ns");
long now = System.currentTimeMillis() * 1000_000L + System.nanoTime() % 1000_000L;
try (Statement stmt = conn.createStatement()) {
// given
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 00:00:00.000000123')");
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999999999')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000000001')");
stmt.executeUpdate("insert into weather(ts) values(" + now + ")");
ResultSet rs = stmt.executeQuery("select * from weather order by ts asc");
ResultSetMetaData metaData = rs.getMetaData();
Assert.assertEquals(2, metaData.getColumnCount());
// when
rs.next();
// then
Timestamp ts = rs.getTimestamp("ts");
Assert.assertEquals("1969-12-31 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime()));
Assert.assertEquals(123, ts.getNanos());
// when // when
rs.next(); rs.next();
// then // then
ts = rs.getTimestamp("ts"); ts = rs.getTimestamp("ts");
Assert.assertEquals("1970-01-01 07:59:59.999", TimestampUtil.longToDatetime(ts.getTime())); Assert.assertEquals("1970-01-01 07:59:59.999", TimestampUtil.longToDatetime(ts.getTime()));
Assert.assertEquals(999999999, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals("1970-01-01 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime()));
Assert.assertEquals(0, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals("1970-01-01 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime()));
Assert.assertEquals(1, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
String s = String.valueOf(now);
Assert.assertEquals(Long.parseLong(s.substring(0, s.length() - 6)), ts.getTime());
Assert.assertEquals(Long.parseLong(s.substring(s.length() - 9)), ts.getNanos());
} }
} }
@Before private Connection createEnvironment(String precision) throws SQLException {
public void before() throws SQLException { String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata&timezone=UTC";
conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); String createSql = "create database if not exists test_timestamp keep 36500";
if (!isEmpty(precision)) {
createSql += " precision '" + precision + "'";
}
conn = DriverManager.getConnection(url);
Statement stmt = conn.createStatement(); Statement stmt = conn.createStatement();
stmt.execute("drop database if exists test_timestamp"); stmt.execute("drop database if exists test_timestamp");
stmt.execute("create database if not exists test_timestamp keep 36500"); stmt.execute(createSql);
stmt.execute("use test_timestamp"); stmt.execute("use test_timestamp");
stmt.execute("create table weather(ts timestamp,f1 float)"); stmt.execute("create table weather(ts timestamp,f1 float)");
stmt.close(); stmt.close();
return conn;
}
private boolean isEmpty(String string) {
return null == string || string.trim().equals("");
} }
@After @After
public void after() throws SQLException { public void after() throws SQLException {
Statement stmt = conn.createStatement(); if (conn != null) {
stmt.execute("drop database if exists test_timestamp"); Statement stmt = conn.createStatement();
if (conn != null) stmt.execute("drop database if exists test_timestamp");
stmt.close();
conn.close(); conn.close();
}
} }
} }
...@@ -41,6 +41,7 @@ cursor.execute("show databases") ...@@ -41,6 +41,7 @@ cursor.execute("show databases")
results = cursor.fetchall() results = cursor.fetchall()
for row in results: for row in results:
print(row) print(row)
cursor.close() cursor.close()
conn.close() conn.close()
``` ```
...@@ -57,8 +58,10 @@ result = conn.query("show databases") ...@@ -57,8 +58,10 @@ result = conn.query("show databases")
num_of_fields = result.field_count num_of_fields = result.field_count
for field in result.fields: for field in result.fields:
print(field) print(field)
for row in result: for row in result:
print(row) print(row)
result.close() result.close()
conn.execute("drop database pytest") conn.execute("drop database pytest")
conn.close() conn.close()
...@@ -75,12 +78,13 @@ def fetch_callback(p_param, p_result, num_of_rows): ...@@ -75,12 +78,13 @@ def fetch_callback(p_param, p_result, num_of_rows):
print("fetched ", num_of_rows, "rows") print("fetched ", num_of_rows, "rows")
p = cast(p_param, POINTER(Counter)) p = cast(p_param, POINTER(Counter))
result = TaosResult(p_result) result = TaosResult(p_result)
if num_of_rows == 0: if num_of_rows == 0:
print("fetching completed") print("fetching completed")
p.contents.done = True p.contents.done = True
result.close() result.close()
return return
if num_of_rows < 0: if num_of_rows < 0:
p.contents.done = True p.contents.done = True
result.check_error(num_of_rows) result.check_error(num_of_rows)
...@@ -90,6 +94,7 @@ def fetch_callback(p_param, p_result, num_of_rows): ...@@ -90,6 +94,7 @@ def fetch_callback(p_param, p_result, num_of_rows):
for row in result.rows_iter(num_of_rows): for row in result.rows_iter(num_of_rows):
# print(row) # print(row)
None None
p.contents.count += result.row_count p.contents.count += result.row_count
result.fetch_rows_a(fetch_callback, p_param) result.fetch_rows_a(fetch_callback, p_param)
...@@ -97,17 +102,19 @@ def fetch_callback(p_param, p_result, num_of_rows): ...@@ -97,17 +102,19 @@ def fetch_callback(p_param, p_result, num_of_rows):
def query_callback(p_param, p_result, code): def query_callback(p_param, p_result, code):
# type: (c_void_p, c_void_p, c_int) -> None # type: (c_void_p, c_void_p, c_int) -> None
if p_result == None: if p_result is None:
return return
result = TaosResult(p_result) result = TaosResult(p_result)
if code == 0: if code == 0:
result.fetch_rows_a(fetch_callback, p_param) result.fetch_rows_a(fetch_callback, p_param)
result.check_error(code) result.check_error(code)
class Counter(Structure): class Counter(Structure):
_fields_ = [("count", c_int), ("done", c_bool)] _fields_ = [("count", c_int), ("done", c_bool)]
def __str__(self): def __str__(self):
return "{ count: %d, done: %s }" % (self.count, self.done) return "{ count: %d, done: %s }" % (self.count, self.done)
...@@ -116,10 +123,11 @@ def test_query(conn): ...@@ -116,10 +123,11 @@ def test_query(conn):
# type: (TaosConnection) -> None # type: (TaosConnection) -> None
counter = Counter(count=0) counter = Counter(count=0)
conn.query_a("select * from log.log", query_callback, byref(counter)) conn.query_a("select * from log.log", query_callback, byref(counter))
while not counter.done: while not counter.done:
print("wait query callback") print("wait query callback")
time.sleep(1) time.sleep(1)
print(counter) print(counter)
conn.close() conn.close()
...@@ -182,6 +190,7 @@ result = conn.query("select * from log") ...@@ -182,6 +190,7 @@ result = conn.query("select * from log")
for row in result: for row in result:
print(row) print(row)
result.close() result.close()
stmt.close() stmt.close()
conn.close() conn.close()
...@@ -237,18 +246,20 @@ result.close() ...@@ -237,18 +246,20 @@ result.close()
result = conn.query("select * from log") result = conn.query("select * from log")
for row in result: for row in result:
print(row) print(row)
result.close() result.close()
stmt.close() stmt.close()
conn.close() conn.close()
``` ```
### Statement API - Subscribe ### Subscription
```python ```python
import taos import taos
import random
conn = taos.connect() conn = taos.connect()
dbname = "pytest_taos_subscribe_callback" dbname = "pytest_taos_subscribe"
conn.execute("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname) conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname) conn.select_db(dbname)
...@@ -256,7 +267,7 @@ conn.execute("create table if not exists log(ts timestamp, n int)") ...@@ -256,7 +267,7 @@ conn.execute("create table if not exists log(ts timestamp, n int)")
for i in range(10): for i in range(10):
conn.execute("insert into log values(now, %d)" % i) conn.execute("insert into log values(now, %d)" % i)
sub = conn.subscribe(True, "test", "select * from log", 1000) sub = conn.subscribe(False, "test", "select * from log", 1000)
print("# consume from begin") print("# consume from begin")
for ts, n in sub.consume(): for ts, n in sub.consume():
print(ts, n) print(ts, n)
...@@ -268,9 +279,18 @@ for i in range(5): ...@@ -268,9 +279,18 @@ for i in range(5):
for ts, n in result: for ts, n in result:
print(ts, n) print(ts, n)
sub.close(True)
print("# keep progress consume")
sub = conn.subscribe(False, "test", "select * from log", 1000)
result = sub.consume()
rows = result.fetch_all()
# consume from latest subscription needs root privilege(for /var/lib/taos).
assert result.row_count == 0
print("## consumed ", len(rows), "rows")
print("# consume with a stop condition") print("# consume with a stop condition")
for i in range(10): for i in range(10):
conn.execute("insert into log values(now, %d)" % int(random() * 10)) conn.execute("insert into log values(now, %d)" % random.randint(0, 10))
result = sub.consume() result = sub.consume()
try: try:
ts, n = next(result) ts, n = next(result)
...@@ -283,12 +303,13 @@ for i in range(10): ...@@ -283,12 +303,13 @@ for i in range(10):
continue continue
sub.close() sub.close()
# sub.close()
conn.execute("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.close() # conn.close()
``` ```
### Statement API - Subscribe asynchronously with callback ### Subscription asynchronously with callback
```python ```python
from taos import * from taos import *
...@@ -300,7 +321,7 @@ import time ...@@ -300,7 +321,7 @@ import time
def subscribe_callback(p_sub, p_result, p_param, errno): def subscribe_callback(p_sub, p_result, p_param, errno):
# type: (c_void_p, c_void_p, c_void_p, c_int) -> None # type: (c_void_p, c_void_p, c_void_p, c_int) -> None
print("# fetch in callback") print("# fetch in callback")
result = TaosResult(p_result) result = TaosResult(c_void_p(p_result))
result.check_error(errno) result.check_error(errno)
for row in result.rows_iter(): for row in result.rows_iter():
ts, n = row() ts, n = row()
...@@ -311,42 +332,45 @@ def test_subscribe_callback(conn): ...@@ -311,42 +332,45 @@ def test_subscribe_callback(conn):
# type: (TaosConnection) -> None # type: (TaosConnection) -> None
dbname = "pytest_taos_subscribe_callback" dbname = "pytest_taos_subscribe_callback"
try: try:
print("drop if exists")
conn.execute("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
print("create database")
conn.execute("create database if not exists %s" % dbname) conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname) print("create table")
conn.execute("create table if not exists log(ts timestamp, n int)") # conn.execute("use %s" % dbname)
conn.execute("create table if not exists %s.log(ts timestamp, n int)" % dbname)
print("# subscribe with callback") print("# subscribe with callback")
sub = conn.subscribe(False, "test", "select * from log", 1000, subscribe_callback) sub = conn.subscribe(False, "test", "select * from %s.log" % dbname, 1000, subscribe_callback)
for i in range(10): for i in range(10):
conn.execute("insert into log values(now, %d)" % i) conn.execute("insert into %s.log values(now, %d)" % (dbname, i))
time.sleep(0.7) time.sleep(0.7)
sub.close() sub.close()
conn.execute("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.close() # conn.close()
except Exception as err: except Exception as err:
conn.execute("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.close() # conn.close()
raise err raise err
if __name__ == "__main__": if __name__ == "__main__":
test_subscribe_callback(connect()) test_subscribe_callback(connect())
``` ```
### Statement API - Stream ### Stream
```python ```python
from taos import * from taos import *
from ctypes import * from ctypes import *
import time
def stream_callback(p_param, p_result, p_row): def stream_callback(p_param, p_result, p_row):
# type: (c_void_p, c_void_p, c_void_p) -> None # type: (c_void_p, c_void_p, c_void_p) -> None
if p_result is None or p_row is None:
if p_result == None or p_row == None:
return return
result = TaosResult(p_result) result = TaosResult(p_result)
row = TaosRow(result, p_row) row = TaosRow(result, p_row)
...@@ -355,13 +379,12 @@ def stream_callback(p_param, p_result, p_row): ...@@ -355,13 +379,12 @@ def stream_callback(p_param, p_result, p_row):
p = cast(p_param, POINTER(Counter)) p = cast(p_param, POINTER(Counter))
p.contents.count += count p.contents.count += count
print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count)) print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count))
except Exception as err: except Exception as err:
print(err) print(err)
raise err raise err
class Counter(ctypes.Structure): class Counter(Structure):
_fields_ = [ _fields_ = [
("count", c_int), ("count", c_int),
] ]
...@@ -378,16 +401,17 @@ def test_stream(conn): ...@@ -378,16 +401,17 @@ def test_stream(conn):
conn.execute("create database if not exists %s" % dbname) conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname) conn.select_db(dbname)
conn.execute("create table if not exists log(ts timestamp, n int)") conn.execute("create table if not exists log(ts timestamp, n int)")
result = conn.query("select count(*) from log interval(5s)") result = conn.query("select count(*) from log interval(5s)")
assert result.field_count == 2 assert result.field_count == 2
counter = Counter() counter = Counter()
counter.count = 0 counter.count = 0
stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter)) stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter))
for _ in range(0, 20): for _ in range(0, 20):
conn.execute("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)") conn.execute("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
time.sleep(2) time.sleep(2)
stream.close() stream.close()
conn.execute("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.close() conn.close()
...@@ -399,12 +423,14 @@ def test_stream(conn): ...@@ -399,12 +423,14 @@ def test_stream(conn):
if __name__ == "__main__": if __name__ == "__main__":
test_stream(connect()) test_stream(connect())
``` ```
### Insert with line protocol ### Insert with line protocol
```python ```python
import taos import taos
from taos import SmlProtocol, SmlPrecision
conn = taos.connect() conn = taos.connect()
dbname = "pytest_line" dbname = "pytest_line"
...@@ -413,29 +439,22 @@ conn.execute("create database if not exists %s precision 'us'" % dbname) ...@@ -413,29 +439,22 @@ conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname) conn.select_db(dbname)
lines = [ lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000ns', 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000',
'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000',
] ]
conn.schemaless_insert(lines, 0, "ns") conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED)
print("inserted") print("inserted")
lines = [ conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED)
'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"pass it again_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000',
]
conn.schemaless_insert(lines, 0, "ns")
result = conn.query("show tables") result = conn.query("show tables")
for row in result: for row in result:
print(row) print(row)
result.close()
conn.execute("drop database if exists %s" % dbname) conn.execute("drop database if exists %s" % dbname)
conn.close()
``` ```
## License - AGPL-3.0 ## License
Keep same with [TDengine](https://github.com/taosdata/TDengine). We use MIT license for Python connector.
...@@ -29,7 +29,7 @@ def fetch_callback(p_param, p_result, num_of_rows): ...@@ -29,7 +29,7 @@ def fetch_callback(p_param, p_result, num_of_rows):
def query_callback(p_param, p_result, code): def query_callback(p_param, p_result, code):
# type: (c_void_p, c_void_p, c_int) -> None # type: (c_void_p, c_void_p, c_int) -> None
if p_result == None: if p_result is None:
return return
result = TaosResult(p_result) result = TaosResult(p_result)
if code == 0: if code == 0:
......
from taos import *
from ctypes import *
import time
def stream_callback(p_param, p_result, p_row):
# type: (c_void_p, c_void_p, c_void_p) -> None
if p_result is None or p_row is None:
return
result = TaosResult(p_result)
row = TaosRow(result, p_row)
try:
ts, count = row()
p = cast(p_param, POINTER(Counter))
p.contents.count += count
print("[%s] inserted %d in 5s, total count: %d" % (ts.strftime("%Y-%m-%d %H:%M:%S"), count, p.contents.count))
except Exception as err:
print(err)
raise err
class Counter(Structure):
_fields_ = [
("count", c_int),
]
def __str__(self):
return "%d" % self.count
def test_stream(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_stream"
try:
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s" % dbname)
conn.select_db(dbname)
conn.execute("create table if not exists log(ts timestamp, n int)")
result = conn.query("select count(*) from log interval(5s)")
assert result.field_count == 2
counter = Counter()
counter.count = 0
stream = conn.stream("select count(*) from log interval(5s)", stream_callback, param=byref(counter))
for _ in range(0, 20):
conn.execute("insert into log values(now,0)(now+1s, 1)(now + 2s, 2)")
time.sleep(2)
stream.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
raise err
if __name__ == "__main__":
test_stream(connect())
...@@ -86,7 +86,7 @@ def fetch_callback(p_param, p_result, num_of_rows): ...@@ -86,7 +86,7 @@ def fetch_callback(p_param, p_result, num_of_rows):
def query_callback(p_param, p_result, code): def query_callback(p_param, p_result, code):
# type: (c_void_p, c_void_p, c_int) -> None # type: (c_void_p, c_void_p, c_int) -> None
if p_result == None: if p_result is None:
return return
result = TaosResult(p_result) result = TaosResult(p_result)
if code == 0: if code == 0:
...@@ -335,7 +335,7 @@ from ctypes import * ...@@ -335,7 +335,7 @@ from ctypes import *
def stream_callback(p_param, p_result, p_row): def stream_callback(p_param, p_result, p_row):
# type: (c_void_p, c_void_p, c_void_p) -> None # type: (c_void_p, c_void_p, c_void_p) -> None
if p_result == None or p_row == None: if p_result is None or p_row is None:
return return
result = TaosResult(p_result) result = TaosResult(p_result)
row = TaosRow(result, p_row) row = TaosRow(result, p_row)
......
...@@ -317,7 +317,7 @@ class TaosMultiBind(ctypes.Structure): ...@@ -317,7 +317,7 @@ class TaosMultiBind(ctypes.Structure):
def _str_to_buffer(self, values): def _str_to_buffer(self, values):
self.num = len(values) self.num = len(values)
is_null = [1 if v == None else 0 for v in values] is_null = [1 if v is None else 0 for v in values]
self.is_null = cast((c_byte * self.num)(*is_null), c_char_p) self.is_null = cast((c_byte * self.num)(*is_null), c_char_p)
if sum(is_null) == self.num: if sum(is_null) == self.num:
......
...@@ -373,9 +373,9 @@ def taos_fetch_block(result, fields=None, field_count=None): ...@@ -373,9 +373,9 @@ def taos_fetch_block(result, fields=None, field_count=None):
if num_of_rows == 0: if num_of_rows == 0:
return None, 0 return None, 0
precision = taos_result_precision(result) precision = taos_result_precision(result)
if fields == None: if fields is None:
fields = taos_fetch_fields(result) fields = taos_fetch_fields(result)
if field_count == None: if field_count is None:
field_count = taos_field_count(result) field_count = taos_field_count(result)
blocks = [None] * field_count blocks = [None] * field_count
fieldLen = taos_fetch_lengths(result, field_count) fieldLen = taos_fetch_lengths(result, field_count)
...@@ -466,7 +466,7 @@ def taos_fetch_lengths(result, field_count=None): ...@@ -466,7 +466,7 @@ def taos_fetch_lengths(result, field_count=None):
# type: (c_void_p, int) -> Array[int] # type: (c_void_p, int) -> Array[int]
"""Make sure to call taos_fetch_row or taos_fetch_block before fetch_lengths""" """Make sure to call taos_fetch_row or taos_fetch_block before fetch_lengths"""
lens = _libtaos.taos_fetch_lengths(result) lens = _libtaos.taos_fetch_lengths(result)
if field_count == None: if field_count is None:
field_count = taos_field_count(result) field_count = taos_field_count(result)
if not lens: if not lens:
raise OperationalError("field length empty, use taos_fetch_row/block before it") raise OperationalError("field length empty, use taos_fetch_row/block before it")
...@@ -823,7 +823,7 @@ def taos_stmt_use_result(stmt): ...@@ -823,7 +823,7 @@ def taos_stmt_use_result(stmt):
@stmt: TAOS_STMT* @stmt: TAOS_STMT*
""" """
result = c_void_p(_libtaos.taos_stmt_use_result(stmt)) result = c_void_p(_libtaos.taos_stmt_use_result(stmt))
if result == None: if result is None:
raise StatementError(taos_stmt_errstr(stmt)) raise StatementError(taos_stmt_errstr(stmt))
return result return result
......
...@@ -41,7 +41,7 @@ class TaosResult(object): ...@@ -41,7 +41,7 @@ class TaosResult(object):
if self._result is None or self.fields is None: if self._result is None or self.fields is None:
raise OperationalError("Invalid use of fetch iterator") raise OperationalError("Invalid use of fetch iterator")
if self._block == None or self._block_iter >= self._block_length: if self._block is None or self._block_iter >= self._block_length:
self._block, self._block_length = self.fetch_block() self._block, self._block_length = self.fetch_block()
self._block_iter = 0 self._block_iter = 0
# self._row_count += self._block_length # self._row_count += self._block_length
...@@ -55,7 +55,7 @@ class TaosResult(object): ...@@ -55,7 +55,7 @@ class TaosResult(object):
"""fields definitions of the current result""" """fields definitions of the current result"""
if self._result is None: if self._result is None:
raise ResultError("no result object setted") raise ResultError("no result object setted")
if self._fields == None: if self._fields is None:
self._fields = taos_fetch_fields(self._result) self._fields = taos_fetch_fields(self._result)
return self._fields return self._fields
...@@ -72,7 +72,7 @@ class TaosResult(object): ...@@ -72,7 +72,7 @@ class TaosResult(object):
@property @property
def precision(self): def precision(self):
if self._precision == None: if self._precision is None:
self._precision = taos_result_precision(self._result) self._precision = taos_result_precision(self._result)
return self._precision return self._precision
...@@ -114,7 +114,7 @@ class TaosResult(object): ...@@ -114,7 +114,7 @@ class TaosResult(object):
if self._result is None: if self._result is None:
raise OperationalError("Invalid use of fetchall") raise OperationalError("Invalid use of fetchall")
if self._fields == None: if self._fields is None:
self._fields = taos_fetch_fields(self._result) self._fields = taos_fetch_fields(self._result)
buffer = [[] for i in range(len(self._fields))] buffer = [[] for i in range(len(self._fields))]
self._row_count = 0 self._row_count = 0
...@@ -150,7 +150,7 @@ class TaosResult(object): ...@@ -150,7 +150,7 @@ class TaosResult(object):
return taos_errstr(self._result) return taos_errstr(self._result)
def check_error(self, errno=None, close=True): def check_error(self, errno=None, close=True):
if errno == None: if errno is None:
errno = self.errno() errno = self.errno()
if errno != 0: if errno != 0:
msg = self.errstr() msg = self.errstr()
......
...@@ -274,7 +274,6 @@ int32_t* taosGetErrno(); ...@@ -274,7 +274,6 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615) //"TSDB invalid tag value") #define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615) //"TSDB invalid tag value")
#define TSDB_CODE_TDB_NO_CACHE_LAST_ROW TAOS_DEF_ERROR_CODE(0, 0x0616) //"TSDB no cache last row data") #define TSDB_CODE_TDB_NO_CACHE_LAST_ROW TAOS_DEF_ERROR_CODE(0, 0x0616) //"TSDB no cache last row data")
#define TSDB_CODE_TDB_INCOMPLETE_DFILESET TAOS_DEF_ERROR_CODE(0, 0x0617) //"TSDB incomplete DFileSet") #define TSDB_CODE_TDB_INCOMPLETE_DFILESET TAOS_DEF_ERROR_CODE(0, 0x0617) //"TSDB incomplete DFileSet")
#define TSDB_CODE_TDB_NO_JSON_TAG_KEY TAOS_DEF_ERROR_CODE(0, 0x0618) //"TSDB no tag json key")
// query // query
#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700) //"Invalid handle") #define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700) //"Invalid handle")
......
...@@ -32,6 +32,10 @@ ELSEIF (TD_WINDOWS) ...@@ -32,6 +32,10 @@ ELSEIF (TD_WINDOWS)
LIST(APPEND SRC ./src/shellMain.c) LIST(APPEND SRC ./src/shellMain.c)
LIST(APPEND SRC ./src/shellWindows.c) LIST(APPEND SRC ./src/shellWindows.c)
ADD_EXECUTABLE(shell ${SRC}) ADD_EXECUTABLE(shell ${SRC})
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ADD_DEPENDENCIES(shell jemalloc)
ENDIF ()
TARGET_LINK_LIBRARIES(shell taos_static cJson) TARGET_LINK_LIBRARIES(shell taos_static cJson)
IF (TD_POWER) IF (TD_POWER)
......
Subproject commit beca4813316f254624d8dbecf54d45a5a232c61d Subproject commit 14a23779d24a9571cdb7165bea2b0208d54c53ad
...@@ -8,3 +8,8 @@ INCLUDE_DIRECTORIES(inc) ...@@ -8,3 +8,8 @@ INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC) AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY(mnode ${SRC}) ADD_LIBRARY(mnode ${SRC})
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ADD_DEPENDENCIES(mnode jemalloc)
ENDIF ()
...@@ -11,6 +11,11 @@ ADD_LIBRARY(os ${SRC}) ...@@ -11,6 +11,11 @@ ADD_LIBRARY(os ${SRC})
IF (TD_LINUX) IF (TD_LINUX)
TARGET_LINK_LIBRARIES(os oslinux) TARGET_LINK_LIBRARIES(os oslinux)
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ADD_DEPENDENCIES(os jemalloc)
ENDIF ()
IF (TD_ARM_32 OR TD_LINUX_32) IF (TD_ARM_32 OR TD_LINUX_32)
TARGET_LINK_LIBRARIES(os atomic) TARGET_LINK_LIBRARIES(os atomic)
ENDIF () ENDIF ()
......
...@@ -4,4 +4,9 @@ PROJECT(TDengine) ...@@ -4,4 +4,9 @@ PROJECT(TDengine)
AUX_SOURCE_DIRECTORY(. SRC) AUX_SOURCE_DIRECTORY(. SRC)
ADD_LIBRARY(oslinux ${SRC}) ADD_LIBRARY(oslinux ${SRC})
TARGET_LINK_LIBRARIES(oslinux m rt z dl) TARGET_LINK_LIBRARIES(oslinux m rt z dl)
\ No newline at end of file
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ADD_DEPENDENCIES(oslinux jemalloc)
ENDIF ()
Subproject commit 826f3d3b7820a5c007d301854d56db003b424d0a Subproject commit 11d1e02255edfeeaa8d5b1f45abfa9637332ce65
...@@ -238,7 +238,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI ...@@ -238,7 +238,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
// (uid, tid) + VGID + TAGSIZE + VARSTR_HEADER_SIZE // (uid, tid) + VGID + TAGSIZE + VARSTR_HEADER_SIZE
if (functionId == TSDB_FUNC_TID_TAG) { // todo use struct if (functionId == TSDB_FUNC_TID_TAG) { // todo use struct
*type = TSDB_DATA_TYPE_BINARY; *type = TSDB_DATA_TYPE_BINARY;
...@@ -253,7 +253,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI ...@@ -253,7 +253,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
*interBytes = 0; *interBytes = 0;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if (functionId == TSDB_FUNC_COUNT) { if (functionId == TSDB_FUNC_COUNT) {
*type = TSDB_DATA_TYPE_BIGINT; *type = TSDB_DATA_TYPE_BIGINT;
*bytes = sizeof(int64_t); *bytes = sizeof(int64_t);
...@@ -261,7 +261,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI ...@@ -261,7 +261,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if (functionId == TSDB_FUNC_TS_COMP) { if (functionId == TSDB_FUNC_TS_COMP) {
*type = TSDB_DATA_TYPE_BINARY; *type = TSDB_DATA_TYPE_BINARY;
*bytes = 1; // this results is compressed ts data, only one byte *bytes = 1; // this results is compressed ts data, only one byte
...@@ -316,20 +316,20 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI ...@@ -316,20 +316,20 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
*type = TSDB_DATA_TYPE_BINARY; *type = TSDB_DATA_TYPE_BINARY;
*bytes = (dataBytes + DATA_SET_FLAG_SIZE); *bytes = (dataBytes + DATA_SET_FLAG_SIZE);
*interBytes = *bytes; *interBytes = *bytes;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else if (functionId == TSDB_FUNC_SUM) { } else if (functionId == TSDB_FUNC_SUM) {
*type = TSDB_DATA_TYPE_BINARY; *type = TSDB_DATA_TYPE_BINARY;
*bytes = sizeof(SSumInfo); *bytes = sizeof(SSumInfo);
*interBytes = *bytes; *interBytes = *bytes;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else if (functionId == TSDB_FUNC_AVG) { } else if (functionId == TSDB_FUNC_AVG) {
*type = TSDB_DATA_TYPE_BINARY; *type = TSDB_DATA_TYPE_BINARY;
*bytes = sizeof(SAvgInfo); *bytes = sizeof(SAvgInfo);
*interBytes = *bytes; *interBytes = *bytes;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else if (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_IRATE) { } else if (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_IRATE) {
*type = TSDB_DATA_TYPE_DOUBLE; *type = TSDB_DATA_TYPE_DOUBLE;
*bytes = sizeof(SRateInfo); *bytes = sizeof(SRateInfo);
...@@ -339,7 +339,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI ...@@ -339,7 +339,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
*type = TSDB_DATA_TYPE_BINARY; *type = TSDB_DATA_TYPE_BINARY;
*bytes = (sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param); *bytes = (sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param);
*interBytes = *bytes; *interBytes = *bytes;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else if (functionId == TSDB_FUNC_SAMPLE) { } else if (functionId == TSDB_FUNC_SAMPLE) {
*type = TSDB_DATA_TYPE_BINARY; *type = TSDB_DATA_TYPE_BINARY;
...@@ -351,7 +351,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI ...@@ -351,7 +351,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
*type = TSDB_DATA_TYPE_BINARY; *type = TSDB_DATA_TYPE_BINARY;
*bytes = sizeof(SSpreadInfo); *bytes = sizeof(SSpreadInfo);
*interBytes = *bytes; *interBytes = *bytes;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else if (functionId == TSDB_FUNC_APERCT) { } else if (functionId == TSDB_FUNC_APERCT) {
*type = TSDB_DATA_TYPE_BINARY; *type = TSDB_DATA_TYPE_BINARY;
...@@ -359,13 +359,13 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI ...@@ -359,13 +359,13 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
int32_t bytesDigest = (int32_t) (sizeof(SAPercentileInfo) + TDIGEST_SIZE(COMPRESSION)); int32_t bytesDigest = (int32_t) (sizeof(SAPercentileInfo) + TDIGEST_SIZE(COMPRESSION));
*bytes = MAX(bytesHist, bytesDigest); *bytes = MAX(bytesHist, bytesDigest);
*interBytes = *bytes; *interBytes = *bytes;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else if (functionId == TSDB_FUNC_LAST_ROW) { } else if (functionId == TSDB_FUNC_LAST_ROW) {
*type = TSDB_DATA_TYPE_BINARY; *type = TSDB_DATA_TYPE_BINARY;
*bytes = (sizeof(SLastrowInfo) + dataBytes); *bytes = (sizeof(SLastrowInfo) + dataBytes);
*interBytes = *bytes; *interBytes = *bytes;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else if (functionId == TSDB_FUNC_TWA) { } else if (functionId == TSDB_FUNC_TWA) {
*type = TSDB_DATA_TYPE_DOUBLE; *type = TSDB_DATA_TYPE_DOUBLE;
...@@ -388,7 +388,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI ...@@ -388,7 +388,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
} else { } else {
*type = TSDB_DATA_TYPE_DOUBLE; *type = TSDB_DATA_TYPE_DOUBLE;
} }
*bytes = sizeof(int64_t); *bytes = sizeof(int64_t);
*interBytes = sizeof(SSumInfo); *interBytes = sizeof(SSumInfo);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -458,9 +458,9 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI ...@@ -458,9 +458,9 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
} else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
*type = (int16_t)dataType; *type = (int16_t)dataType;
*bytes = dataBytes; *bytes = dataBytes;
size_t size = sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param; size_t size = sizeof(STopBotInfo) + (sizeof(tValuePair) + POINTER_BYTES + extLength) * param;
// the output column may be larger than sizeof(STopBotInfo) // the output column may be larger than sizeof(STopBotInfo)
*interBytes = (int32_t)size; *interBytes = (int32_t)size;
} else if (functionId == TSDB_FUNC_SAMPLE) { } else if (functionId == TSDB_FUNC_SAMPLE) {
...@@ -484,7 +484,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI ...@@ -484,7 +484,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
} else { } else {
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -501,7 +501,7 @@ int32_t isValidFunction(const char* name, int32_t len) { ...@@ -501,7 +501,7 @@ int32_t isValidFunction(const char* name, int32_t len) {
return aScalarFunctions[i].functionId; return aScalarFunctions[i].functionId;
} }
} }
for(int32_t i = 0; i <= TSDB_FUNC_ELAPSED; ++i) { for(int32_t i = 0; i <= TSDB_FUNC_ELAPSED; ++i) {
int32_t nameLen = (int32_t) strlen(aAggs[i].name); int32_t nameLen = (int32_t) strlen(aAggs[i].name);
if (len != nameLen) { if (len != nameLen) {
...@@ -519,7 +519,7 @@ static bool function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo ...@@ -519,7 +519,7 @@ static bool function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo
if (pResultInfo->initialized) { if (pResultInfo->initialized) {
return false; return false;
} }
memset(pCtx->pOutput, 0, (size_t)pCtx->outputBytes); memset(pCtx->pOutput, 0, (size_t)pCtx->outputBytes);
initResultInfo(pResultInfo, pCtx->interBufBytes); initResultInfo(pResultInfo, pCtx->interBufBytes);
return true; return true;
...@@ -537,7 +537,7 @@ static void function_finalizer(SQLFunctionCtx *pCtx) { ...@@ -537,7 +537,7 @@ static void function_finalizer(SQLFunctionCtx *pCtx) {
if (pResInfo->hasResult != DATA_SET_FLAG) { if (pResInfo->hasResult != DATA_SET_FLAG) {
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
} }
doFinalizer(pCtx); doFinalizer(pCtx);
} }
...@@ -547,7 +547,7 @@ static void function_finalizer(SQLFunctionCtx *pCtx) { ...@@ -547,7 +547,7 @@ static void function_finalizer(SQLFunctionCtx *pCtx) {
*/ */
static void count_function(SQLFunctionCtx *pCtx) { static void count_function(SQLFunctionCtx *pCtx) {
int32_t numOfElem = 0; int32_t numOfElem = 0;
/* /*
* 1. column data missing (schema modified) causes pCtx->hasNull == true. pCtx->preAggVals.isSet == true; * 1. column data missing (schema modified) causes pCtx->hasNull == true. pCtx->preAggVals.isSet == true;
* 2. for general non-primary key columns, pCtx->hasNull may be true or false, pCtx->preAggVals.isSet == true; * 2. for general non-primary key columns, pCtx->hasNull may be true or false, pCtx->preAggVals.isSet == true;
...@@ -562,7 +562,7 @@ static void count_function(SQLFunctionCtx *pCtx) { ...@@ -562,7 +562,7 @@ static void count_function(SQLFunctionCtx *pCtx) {
if (isNull(val, pCtx->inputType)) { if (isNull(val, pCtx->inputType)) {
continue; continue;
} }
numOfElem += 1; numOfElem += 1;
} }
} else { } else {
...@@ -570,11 +570,11 @@ static void count_function(SQLFunctionCtx *pCtx) { ...@@ -570,11 +570,11 @@ static void count_function(SQLFunctionCtx *pCtx) {
numOfElem = pCtx->size; numOfElem = pCtx->size;
} }
} }
if (numOfElem > 0) { if (numOfElem > 0) {
GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG;
} }
*((int64_t *)pCtx->pOutput) += numOfElem; *((int64_t *)pCtx->pOutput) += numOfElem;
SET_VAL(pCtx, numOfElem, 1); SET_VAL(pCtx, numOfElem, 1);
} }
...@@ -584,7 +584,7 @@ static void count_func_merge(SQLFunctionCtx *pCtx) { ...@@ -584,7 +584,7 @@ static void count_func_merge(SQLFunctionCtx *pCtx) {
for (int32_t i = 0; i < pCtx->size; ++i) { for (int32_t i = 0; i < pCtx->size; ++i) {
*((int64_t *)pCtx->pOutput) += pData[i]; *((int64_t *)pCtx->pOutput) += pData[i];
} }
SET_VAL(pCtx, pCtx->size, 1); SET_VAL(pCtx, pCtx->size, 1);
} }
...@@ -679,12 +679,12 @@ int32_t noDataRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) { ...@@ -679,12 +679,12 @@ int32_t noDataRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
static void do_sum(SQLFunctionCtx *pCtx) { static void do_sum(SQLFunctionCtx *pCtx) {
int32_t notNullElems = 0; int32_t notNullElems = 0;
// Only the pre-computing information loaded and actual data does not loaded // Only the pre-computing information loaded and actual data does not loaded
if (pCtx->preAggVals.isSet) { if (pCtx->preAggVals.isSet) {
notNullElems = pCtx->size - pCtx->preAggVals.statis.numOfNull; notNullElems = pCtx->size - pCtx->preAggVals.statis.numOfNull;
assert(pCtx->size >= pCtx->preAggVals.statis.numOfNull); assert(pCtx->size >= pCtx->preAggVals.statis.numOfNull);
if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) { if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) {
int64_t *retVal = (int64_t *)pCtx->pOutput; int64_t *retVal = (int64_t *)pCtx->pOutput;
*retVal += pCtx->preAggVals.statis.sum; *retVal += pCtx->preAggVals.statis.sum;
...@@ -731,10 +731,10 @@ static void do_sum(SQLFunctionCtx *pCtx) { ...@@ -731,10 +731,10 @@ static void do_sum(SQLFunctionCtx *pCtx) {
LIST_ADD_N_DOUBLE_FLOAT(*retVal, pCtx, pData, float, notNullElems, pCtx->inputType); LIST_ADD_N_DOUBLE_FLOAT(*retVal, pCtx, pData, float, notNullElems, pCtx->inputType);
} }
} }
// data in the check operation are all null, not output // data in the check operation are all null, not output
SET_VAL(pCtx, notNullElems, 1); SET_VAL(pCtx, notNullElems, 1);
if (notNullElems > 0) { if (notNullElems > 0) {
GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG;
} }
...@@ -742,7 +742,7 @@ static void do_sum(SQLFunctionCtx *pCtx) { ...@@ -742,7 +742,7 @@ static void do_sum(SQLFunctionCtx *pCtx) {
static void sum_function(SQLFunctionCtx *pCtx) { static void sum_function(SQLFunctionCtx *pCtx) {
do_sum(pCtx); do_sum(pCtx);
// keep the result data in output buffer, not in the intermediate buffer // keep the result data in output buffer, not in the intermediate buffer
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) {
...@@ -778,7 +778,7 @@ static void sum_func_merge(SQLFunctionCtx *pCtx) { ...@@ -778,7 +778,7 @@ static void sum_func_merge(SQLFunctionCtx *pCtx) {
SET_VAL(pCtx, notNullElems, 1); SET_VAL(pCtx, notNullElems, 1);
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
if (notNullElems > 0) { if (notNullElems > 0) {
pResInfo->hasResult = DATA_SET_FLAG; pResInfo->hasResult = DATA_SET_FLAG;
} }
...@@ -797,7 +797,7 @@ static int32_t firstFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t c ...@@ -797,7 +797,7 @@ static int32_t firstFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t c
if (pCtx->order == TSDB_ORDER_DESC) { if (pCtx->order == TSDB_ORDER_DESC) {
return BLK_DATA_NO_NEEDED; return BLK_DATA_NO_NEEDED;
} }
// no result for first query, data block is required // no result for first query, data block is required
if (GET_RES_INFO(pCtx) == NULL || GET_RES_INFO(pCtx)->numOfRes <= 0) { if (GET_RES_INFO(pCtx) == NULL || GET_RES_INFO(pCtx)->numOfRes <= 0) {
return BLK_DATA_ALL_NEEDED; return BLK_DATA_ALL_NEEDED;
...@@ -810,7 +810,7 @@ static int32_t lastFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t co ...@@ -810,7 +810,7 @@ static int32_t lastFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t co
if (pCtx->order != pCtx->param[0].i64) { if (pCtx->order != pCtx->param[0].i64) {
return BLK_DATA_NO_NEEDED; return BLK_DATA_NO_NEEDED;
} }
if (GET_RES_INFO(pCtx) == NULL || GET_RES_INFO(pCtx)->numOfRes <= 0) { if (GET_RES_INFO(pCtx) == NULL || GET_RES_INFO(pCtx)->numOfRes <= 0) {
return BLK_DATA_ALL_NEEDED; return BLK_DATA_ALL_NEEDED;
} else { } else {
...@@ -866,17 +866,17 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_ ...@@ -866,17 +866,17 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_
*/ */
static void avg_function(SQLFunctionCtx *pCtx) { static void avg_function(SQLFunctionCtx *pCtx) {
int32_t notNullElems = 0; int32_t notNullElems = 0;
// NOTE: keep the intermediate result into the interResultBuf // NOTE: keep the intermediate result into the interResultBuf
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo); SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo);
double *pVal = &pAvgInfo->sum; double *pVal = &pAvgInfo->sum;
if (pCtx->preAggVals.isSet) { // Pre-aggregation if (pCtx->preAggVals.isSet) { // Pre-aggregation
notNullElems = pCtx->size - pCtx->preAggVals.statis.numOfNull; notNullElems = pCtx->size - pCtx->preAggVals.statis.numOfNull;
assert(notNullElems >= 0); assert(notNullElems >= 0);
if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) { if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) {
*pVal += pCtx->preAggVals.statis.sum; *pVal += pCtx->preAggVals.statis.sum;
} else if (IS_UNSIGNED_NUMERIC_TYPE(pCtx->inputType)) { } else if (IS_UNSIGNED_NUMERIC_TYPE(pCtx->inputType)) {
...@@ -886,7 +886,7 @@ static void avg_function(SQLFunctionCtx *pCtx) { ...@@ -886,7 +886,7 @@ static void avg_function(SQLFunctionCtx *pCtx) {
} }
} else { } else {
void *pData = GET_INPUT_DATA_LIST(pCtx); void *pData = GET_INPUT_DATA_LIST(pCtx);
if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) {
LIST_ADD_N(*pVal, pCtx, pData, int8_t, notNullElems, pCtx->inputType); LIST_ADD_N(*pVal, pCtx, pData, int8_t, notNullElems, pCtx->inputType);
} else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) {
...@@ -909,18 +909,18 @@ static void avg_function(SQLFunctionCtx *pCtx) { ...@@ -909,18 +909,18 @@ static void avg_function(SQLFunctionCtx *pCtx) {
LIST_ADD_N(*pVal, pCtx, pData, uint64_t, notNullElems, pCtx->inputType); LIST_ADD_N(*pVal, pCtx, pData, uint64_t, notNullElems, pCtx->inputType);
} }
} }
if (!pCtx->hasNull) { if (!pCtx->hasNull) {
assert(notNullElems == pCtx->size); assert(notNullElems == pCtx->size);
} }
SET_VAL(pCtx, notNullElems, 1); SET_VAL(pCtx, notNullElems, 1);
pAvgInfo->num += notNullElems; pAvgInfo->num += notNullElems;
if (notNullElems > 0) { if (notNullElems > 0) {
pResInfo->hasResult = DATA_SET_FLAG; pResInfo->hasResult = DATA_SET_FLAG;
} }
// keep the data into the final output buffer for super table query since this execution may be the last one // keep the data into the final output buffer for super table query since this execution may be the last one
if (pCtx->stableQuery) { if (pCtx->stableQuery) {
memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo)); memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo));
...@@ -929,18 +929,18 @@ static void avg_function(SQLFunctionCtx *pCtx) { ...@@ -929,18 +929,18 @@ static void avg_function(SQLFunctionCtx *pCtx) {
static void avg_func_merge(SQLFunctionCtx *pCtx) { static void avg_func_merge(SQLFunctionCtx *pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
double *sum = (double*) pCtx->pOutput; double *sum = (double*) pCtx->pOutput;
char *input = GET_INPUT_DATA_LIST(pCtx); char *input = GET_INPUT_DATA_LIST(pCtx);
for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) {
SAvgInfo *pInput = (SAvgInfo *)input; SAvgInfo *pInput = (SAvgInfo *)input;
if (pInput->num == 0) { // current input is null if (pInput->num == 0) { // current input is null
continue; continue;
} }
SET_DOUBLE_VAL(sum, *sum + pInput->sum); SET_DOUBLE_VAL(sum, *sum + pInput->sum);
// keep the number of data into the temp buffer // keep the number of data into the temp buffer
*(int64_t *)GET_ROWCELL_INTERBUF(pResInfo) += pInput->num; *(int64_t *)GET_ROWCELL_INTERBUF(pResInfo) += pInput->num;
} }
...@@ -951,10 +951,10 @@ static void avg_func_merge(SQLFunctionCtx *pCtx) { ...@@ -951,10 +951,10 @@ static void avg_func_merge(SQLFunctionCtx *pCtx) {
*/ */
static void avg_finalizer(SQLFunctionCtx *pCtx) { static void avg_finalizer(SQLFunctionCtx *pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
if (pCtx->currentStage == MERGE_STAGE) { if (pCtx->currentStage == MERGE_STAGE) {
assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY);
if (GET_INT64_VAL(GET_ROWCELL_INTERBUF(pResInfo)) <= 0) { if (GET_INT64_VAL(GET_ROWCELL_INTERBUF(pResInfo)) <= 0) {
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
return; return;
...@@ -964,15 +964,15 @@ static void avg_finalizer(SQLFunctionCtx *pCtx) { ...@@ -964,15 +964,15 @@ static void avg_finalizer(SQLFunctionCtx *pCtx) {
} else { // this is the secondary merge, only in the secondary merge, the input type is TSDB_DATA_TYPE_BINARY } else { // this is the secondary merge, only in the secondary merge, the input type is TSDB_DATA_TYPE_BINARY
assert(IS_NUMERIC_TYPE(pCtx->inputType)); assert(IS_NUMERIC_TYPE(pCtx->inputType));
SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo); SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo);
if (pAvgInfo->num == 0) { // all data are NULL or empty table if (pAvgInfo->num == 0) { // all data are NULL or empty table
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
return; return;
} }
SET_DOUBLE_VAL((double *)pCtx->pOutput, pAvgInfo->sum / pAvgInfo->num); SET_DOUBLE_VAL((double *)pCtx->pOutput, pAvgInfo->sum / pAvgInfo->num);
} }
// cannot set the numOfIteratedElems again since it is set during previous iteration // cannot set the numOfIteratedElems again since it is set during previous iteration
GET_RES_INFO(pCtx)->numOfRes = 1; GET_RES_INFO(pCtx)->numOfRes = 1;
doFinalizer(pCtx); doFinalizer(pCtx);
...@@ -992,7 +992,7 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, ...@@ -992,7 +992,7 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
void* tval = NULL; void* tval = NULL;
int16_t index = 0; int16_t index = 0;
if (isMin) { if (isMin) {
tval = &pCtx->preAggVals.statis.min; tval = &pCtx->preAggVals.statis.min;
index = pCtx->preAggVals.statis.minIndex; index = pCtx->preAggVals.statis.minIndex;
...@@ -1000,7 +1000,7 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, ...@@ -1000,7 +1000,7 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
tval = &pCtx->preAggVals.statis.max; tval = &pCtx->preAggVals.statis.max;
index = pCtx->preAggVals.statis.maxIndex; index = pCtx->preAggVals.statis.maxIndex;
} }
TSKEY key = TSKEY_INITIAL_VAL; TSKEY key = TSKEY_INITIAL_VAL;
if (pCtx->ptsList != NULL) { if (pCtx->ptsList != NULL) {
/** /**
...@@ -1016,23 +1016,23 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, ...@@ -1016,23 +1016,23 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
// the index is the original position, not the relative position // the index is the original position, not the relative position
key = pCtx->ptsList[index]; key = pCtx->ptsList[index];
} }
if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) { if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) {
int64_t val = GET_INT64_VAL(tval); int64_t val = GET_INT64_VAL(tval);
if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) {
int8_t *data = (int8_t *)pOutput; int8_t *data = (int8_t *)pOutput;
UPDATE_DATA(pCtx, *data, (int8_t)val, notNullElems, isMin, key); UPDATE_DATA(pCtx, *data, (int8_t)val, notNullElems, isMin, key);
} else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) {
int16_t *data = (int16_t *)pOutput; int16_t *data = (int16_t *)pOutput;
UPDATE_DATA(pCtx, *data, (int16_t)val, notNullElems, isMin, key); UPDATE_DATA(pCtx, *data, (int16_t)val, notNullElems, isMin, key);
} else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) {
int32_t *data = (int32_t *)pOutput; int32_t *data = (int32_t *)pOutput;
#if defined(_DEBUG_VIEW) #if defined(_DEBUG_VIEW)
qDebug("max value updated according to pre-cal:%d", *data); qDebug("max value updated according to pre-cal:%d", *data);
#endif #endif
if ((*data < val) ^ isMin) { if ((*data < val) ^ isMin) {
*data = (int32_t)val; *data = (int32_t)val;
for (int32_t i = 0; i < (pCtx)->tagInfo.numOfTagCols; ++i) { for (int32_t i = 0; i < (pCtx)->tagInfo.numOfTagCols; ++i) {
...@@ -1041,7 +1041,7 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, ...@@ -1041,7 +1041,7 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
__ctx->tag.i64 = key; __ctx->tag.i64 = key;
__ctx->tag.nType = TSDB_DATA_TYPE_BIGINT; __ctx->tag.nType = TSDB_DATA_TYPE_BIGINT;
} }
aAggs[TSDB_FUNC_TAG].xFunction(__ctx); aAggs[TSDB_FUNC_TAG].xFunction(__ctx);
} }
} }
...@@ -1073,18 +1073,18 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, ...@@ -1073,18 +1073,18 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
} else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
float *data = (float *)pOutput; float *data = (float *)pOutput;
double val = GET_DOUBLE_VAL(tval); double val = GET_DOUBLE_VAL(tval);
UPDATE_DATA(pCtx, *data, (float)val, notNullElems, isMin, key); UPDATE_DATA(pCtx, *data, (float)val, notNullElems, isMin, key);
} }
return; return;
} }
void *p = GET_INPUT_DATA_LIST(pCtx); void *p = GET_INPUT_DATA_LIST(pCtx);
TSKEY *tsList = GET_TS_LIST(pCtx); TSKEY *tsList = GET_TS_LIST(pCtx);
*notNullElems = 0; *notNullElems = 0;
if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) { if (IS_SIGNED_NUMERIC_TYPE(pCtx->inputType)) {
if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) {
TYPED_LOOPCHECK_N(int8_t, pOutput, p, pCtx, pCtx->inputType, isMin, *notNullElems); TYPED_LOOPCHECK_N(int8_t, pOutput, p, pCtx, pCtx->inputType, isMin, *notNullElems);
...@@ -1093,12 +1093,12 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, ...@@ -1093,12 +1093,12 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
} else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) {
int32_t *pData = p; int32_t *pData = p;
int32_t *retVal = (int32_t*) pOutput; int32_t *retVal = (int32_t*) pOutput;
for (int32_t i = 0; i < pCtx->size; ++i) { for (int32_t i = 0; i < pCtx->size; ++i) {
if (pCtx->hasNull && isNull((const char*)&pData[i], pCtx->inputType)) { if (pCtx->hasNull && isNull((const char*)&pData[i], pCtx->inputType)) {
continue; continue;
} }
if ((*retVal < pData[i]) ^ isMin) { if ((*retVal < pData[i]) ^ isMin) {
*retVal = pData[i]; *retVal = pData[i];
if(tsList) { if(tsList) {
...@@ -1135,9 +1135,9 @@ static bool min_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo ...@@ -1135,9 +1135,9 @@ static bool min_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo
if (!function_setup(pCtx, pResultInfo)) { if (!function_setup(pCtx, pResultInfo)) {
return false; // not initialized since it has been initialized return false; // not initialized since it has been initialized
} }
GET_TRUE_DATA_TYPE(); GET_TRUE_DATA_TYPE();
switch (type) { switch (type) {
case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_TINYINT:
*((int8_t *)pCtx->pOutput) = INT8_MAX; *((int8_t *)pCtx->pOutput) = INT8_MAX;
...@@ -1180,9 +1180,9 @@ static bool max_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo ...@@ -1180,9 +1180,9 @@ static bool max_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo
if (!function_setup(pCtx, pResultInfo)) { if (!function_setup(pCtx, pResultInfo)) {
return false; // not initialized since it has been initialized return false; // not initialized since it has been initialized
} }
GET_TRUE_DATA_TYPE(); GET_TRUE_DATA_TYPE();
switch (type) { switch (type) {
case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_INT:
*((int32_t *)pCtx->pOutput) = INT32_MIN; *((int32_t *)pCtx->pOutput) = INT32_MIN;
...@@ -1217,7 +1217,7 @@ static bool max_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo ...@@ -1217,7 +1217,7 @@ static bool max_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo
default: default:
qError("illegal data type:%d in min/max query", pCtx->inputType); qError("illegal data type:%d in min/max query", pCtx->inputType);
} }
return true; return true;
} }
...@@ -1227,13 +1227,13 @@ static bool max_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo ...@@ -1227,13 +1227,13 @@ static bool max_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo
static void min_function(SQLFunctionCtx *pCtx) { static void min_function(SQLFunctionCtx *pCtx) {
int32_t notNullElems = 0; int32_t notNullElems = 0;
minMax_function(pCtx, pCtx->pOutput, 1, &notNullElems); minMax_function(pCtx, pCtx->pOutput, 1, &notNullElems);
SET_VAL(pCtx, notNullElems, 1); SET_VAL(pCtx, notNullElems, 1);
if (notNullElems > 0) { if (notNullElems > 0) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
pResInfo->hasResult = DATA_SET_FLAG; pResInfo->hasResult = DATA_SET_FLAG;
// set the flag for super table query // set the flag for super table query
if (pCtx->stableQuery) { if (pCtx->stableQuery) {
*(pCtx->pOutput + pCtx->inputBytes) = DATA_SET_FLAG; *(pCtx->pOutput + pCtx->inputBytes) = DATA_SET_FLAG;
...@@ -1244,13 +1244,13 @@ static void min_function(SQLFunctionCtx *pCtx) { ...@@ -1244,13 +1244,13 @@ static void min_function(SQLFunctionCtx *pCtx) {
static void max_function(SQLFunctionCtx *pCtx) { static void max_function(SQLFunctionCtx *pCtx) {
int32_t notNullElems = 0; int32_t notNullElems = 0;
minMax_function(pCtx, pCtx->pOutput, 0, &notNullElems); minMax_function(pCtx, pCtx->pOutput, 0, &notNullElems);
SET_VAL(pCtx, notNullElems, 1); SET_VAL(pCtx, notNullElems, 1);
if (notNullElems > 0) { if (notNullElems > 0) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
pResInfo->hasResult = DATA_SET_FLAG; pResInfo->hasResult = DATA_SET_FLAG;
// set the flag for super table query // set the flag for super table query
if (pCtx->stableQuery) { if (pCtx->stableQuery) {
*(pCtx->pOutput + pCtx->inputBytes) = DATA_SET_FLAG; *(pCtx->pOutput + pCtx->inputBytes) = DATA_SET_FLAG;
...@@ -1260,16 +1260,16 @@ static void max_function(SQLFunctionCtx *pCtx) { ...@@ -1260,16 +1260,16 @@ static void max_function(SQLFunctionCtx *pCtx) {
static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *output, bool isMin) { static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *output, bool isMin) {
int32_t notNullElems = 0; int32_t notNullElems = 0;
GET_TRUE_DATA_TYPE(); GET_TRUE_DATA_TYPE();
assert(pCtx->stableQuery); assert(pCtx->stableQuery);
for (int32_t i = 0; i < pCtx->size; ++i) { for (int32_t i = 0; i < pCtx->size; ++i) {
char *input = GET_INPUT_DATA(pCtx, i); char *input = GET_INPUT_DATA(pCtx, i);
if (input[bytes] != DATA_SET_FLAG) { if (input[bytes] != DATA_SET_FLAG) {
continue; continue;
} }
switch (type) { switch (type) {
case TSDB_DATA_TYPE_TINYINT: { case TSDB_DATA_TYPE_TINYINT: {
int8_t v = GET_INT8_VAL(input); int8_t v = GET_INT8_VAL(input);
...@@ -1285,12 +1285,12 @@ static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *outp ...@@ -1285,12 +1285,12 @@ static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *outp
int32_t v = GET_INT32_VAL(input); int32_t v = GET_INT32_VAL(input);
if ((*(int32_t *)output < v) ^ isMin) { if ((*(int32_t *)output < v) ^ isMin) {
*(int32_t *)output = v; *(int32_t *)output = v;
for (int32_t j = 0; j < pCtx->tagInfo.numOfTagCols; ++j) { for (int32_t j = 0; j < pCtx->tagInfo.numOfTagCols; ++j) {
SQLFunctionCtx *__ctx = pCtx->tagInfo.pTagCtxList[j]; SQLFunctionCtx *__ctx = pCtx->tagInfo.pTagCtxList[j];
aAggs[TSDB_FUNC_TAG].xFunction(__ctx); aAggs[TSDB_FUNC_TAG].xFunction(__ctx);
} }
notNullElems++; notNullElems++;
} }
break; break;
...@@ -1339,15 +1339,15 @@ static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *outp ...@@ -1339,15 +1339,15 @@ static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *outp
break; break;
} }
} }
return notNullElems; return notNullElems;
} }
static void min_func_merge(SQLFunctionCtx *pCtx) { static void min_func_merge(SQLFunctionCtx *pCtx) {
int32_t notNullElems = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->pOutput, 1); int32_t notNullElems = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->pOutput, 1);
SET_VAL(pCtx, notNullElems, 1); SET_VAL(pCtx, notNullElems, 1);
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
if (notNullElems > 0) { if (notNullElems > 0) {
pResInfo->hasResult = DATA_SET_FLAG; pResInfo->hasResult = DATA_SET_FLAG;
...@@ -1356,9 +1356,9 @@ static void min_func_merge(SQLFunctionCtx *pCtx) { ...@@ -1356,9 +1356,9 @@ static void min_func_merge(SQLFunctionCtx *pCtx) {
static void max_func_merge(SQLFunctionCtx *pCtx) { static void max_func_merge(SQLFunctionCtx *pCtx) {
int32_t numOfElem = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->pOutput, 0); int32_t numOfElem = minmax_merge_impl(pCtx, pCtx->outputBytes, pCtx->pOutput, 0);
SET_VAL(pCtx, numOfElem, 1); SET_VAL(pCtx, numOfElem, 1);
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
if (numOfElem > 0) { if (numOfElem > 0) {
pResInfo->hasResult = DATA_SET_FLAG; pResInfo->hasResult = DATA_SET_FLAG;
...@@ -4870,7 +4870,8 @@ static void elapsedFinalizer(SQLFunctionCtx *pCtx) { ...@@ -4870,7 +4870,8 @@ static void elapsedFinalizer(SQLFunctionCtx *pCtx) {
} }
SElapsedInfo *pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); SElapsedInfo *pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
*(double *)pCtx->pOutput = (double)pInfo->max - (double)pInfo->min; double result = (double)pInfo->max - (double)pInfo->min;
*(double *)pCtx->pOutput = result >= 0 ? result : -result;
if (pCtx->numOfParams > 0 && pCtx->param[0].i64 > 0) { if (pCtx->numOfParams > 0 && pCtx->param[0].i64 > 0) {
*(double *)pCtx->pOutput = *(double *)pCtx->pOutput / pCtx->param[0].i64; *(double *)pCtx->pOutput = *(double *)pCtx->pOutput / pCtx->param[0].i64;
} }
......
...@@ -1942,7 +1942,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr ...@@ -1942,7 +1942,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr
for (int32_t j = 0; j < pCtx->numOfParams; ++j) { for (int32_t j = 0; j < pCtx->numOfParams; ++j) {
int16_t type = pSqlExpr->param[j].nType; int16_t type = pSqlExpr->param[j].nType;
int16_t bytes = pSqlExpr->param[j].nLen; int16_t bytes = pSqlExpr->param[j].nLen;
if (pSqlExpr->functionId == TSDB_FUNC_STDDEV_DST) { if (pSqlExpr->functionId == TSDB_FUNC_STDDEV_DST || pSqlExpr->functionId == TSDB_FUNC_TS_COMP) {
continue; continue;
} }
......
...@@ -1040,7 +1040,10 @@ void tColModelDisplay(SColumnModel *pModel, void *pData, int32_t numOfRows, int3 ...@@ -1040,7 +1040,10 @@ void tColModelDisplay(SColumnModel *pModel, void *pData, int32_t numOfRows, int3
break; break;
case TSDB_DATA_TYPE_NCHAR: { case TSDB_DATA_TYPE_NCHAR: {
char buf[4096] = {0}; char buf[4096] = {0};
taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf); int32_t len = taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf);
if (len < 0){
qError("castConvert1 taosUcs4ToMbs error");
}
printf("%s\t", buf); printf("%s\t", buf);
break; break;
} }
...@@ -1092,7 +1095,10 @@ void tColModelDisplayEx(SColumnModel *pModel, void *pData, int32_t numOfRows, in ...@@ -1092,7 +1095,10 @@ void tColModelDisplayEx(SColumnModel *pModel, void *pData, int32_t numOfRows, in
break; break;
case TSDB_DATA_TYPE_NCHAR: { case TSDB_DATA_TYPE_NCHAR: {
char buf[128] = {0}; char buf[128] = {0};
taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf); int32_t len = taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf);
if (len < 0){
qError("castConvert1 taosUcs4ToMbs error");
}
printf("%s\t", buf); printf("%s\t", buf);
break; break;
} }
......
...@@ -1899,12 +1899,20 @@ int32_t filterInitValFieldData(SFilterInfo *info) { ...@@ -1899,12 +1899,20 @@ int32_t filterInitValFieldData(SFilterInfo *info) {
(unit->compare.optr == TSDB_RELATION_MATCH || unit->compare.optr == TSDB_RELATION_NMATCH)){ (unit->compare.optr == TSDB_RELATION_MATCH || unit->compare.optr == TSDB_RELATION_NMATCH)){
char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0}; char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0};
int32_t len = taosUcs4ToMbs(varDataVal(fi->data), varDataLen(fi->data), varDataVal(newValData)); int32_t len = taosUcs4ToMbs(varDataVal(fi->data), varDataLen(fi->data), varDataVal(newValData));
if (len < 0){
qError("filterInitValFieldData taosUcs4ToMbs error 1");
return TSDB_CODE_FAILED;
}
varDataSetLen(newValData, len); varDataSetLen(newValData, len);
varDataCopy(fi->data, newValData); varDataCopy(fi->data, newValData);
}else if(type == TSDB_DATA_TYPE_JSON && }else if(type == TSDB_DATA_TYPE_JSON &&
(unit->compare.optr == TSDB_RELATION_MATCH || unit->compare.optr == TSDB_RELATION_NMATCH)){ (unit->compare.optr == TSDB_RELATION_MATCH || unit->compare.optr == TSDB_RELATION_NMATCH)){
char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0}; char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0};
int32_t len = taosUcs4ToMbs(((tVariant*)(fi->desc))->pz, ((tVariant*)(fi->desc))->nLen, newValData); int32_t len = taosUcs4ToMbs(((tVariant*)(fi->desc))->pz, ((tVariant*)(fi->desc))->nLen, newValData);
if (len < 0){
qError("filterInitValFieldData taosUcs4ToMbs error 2");
return TSDB_CODE_FAILED;
}
memcpy(((tVariant*)(fi->desc))->pz, newValData, len); memcpy(((tVariant*)(fi->desc))->pz, newValData, len);
((tVariant*)(fi->desc))->nLen = len; ((tVariant*)(fi->desc))->nLen = len;
} }
...@@ -3025,6 +3033,11 @@ static void doJsonCompare(SFilterComUnit *cunit, int8_t *result, void* colData){ ...@@ -3025,6 +3033,11 @@ static void doJsonCompare(SFilterComUnit *cunit, int8_t *result, void* colData){
}else{ }else{
char *newColData = calloc(cunit->dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1); char *newColData = calloc(cunit->dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1);
int len = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), varDataVal(newColData)); int len = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), varDataVal(newColData));
if (len < 0){
qError("castConvert1 taosUcs4ToMbs error");
tfree(newColData);
return;
}
varDataSetLen(newColData, len); varDataSetLen(newColData, len);
tVariant* val = cunit->valData; tVariant* val = cunit->valData;
char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0}; char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0};
...@@ -3113,9 +3126,13 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SDataStat ...@@ -3113,9 +3126,13 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SDataStat
if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_NCHAR && (info->cunits[uidx].optr == TSDB_RELATION_MATCH || info->cunits[uidx].optr == TSDB_RELATION_NMATCH)){ if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_NCHAR && (info->cunits[uidx].optr == TSDB_RELATION_MATCH || info->cunits[uidx].optr == TSDB_RELATION_NMATCH)){
char *newColData = calloc(info->cunits[uidx].dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1); char *newColData = calloc(info->cunits[uidx].dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1);
int len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData)); int32_t len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData));
varDataSetLen(newColData, len); if (len < 0){
(*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, newColData, info->cunits[uidx].valData); qError("castConvert1 taosUcs4ToMbs error");
}else{
varDataSetLen(newColData, len);
(*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, newColData, info->cunits[uidx].valData);
}
tfree(newColData); tfree(newColData);
}else if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_JSON){ }else if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_JSON){
doJsonCompare(&(info->cunits[uidx]), &(*p)[i], colData); doJsonCompare(&(info->cunits[uidx]), &(*p)[i], colData);
...@@ -3170,9 +3187,13 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis * ...@@ -3170,9 +3187,13 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *
} else { } else {
if(cunit->dataType == TSDB_DATA_TYPE_NCHAR && (cunit->optr == TSDB_RELATION_MATCH || cunit->optr == TSDB_RELATION_NMATCH)){ if(cunit->dataType == TSDB_DATA_TYPE_NCHAR && (cunit->optr == TSDB_RELATION_MATCH || cunit->optr == TSDB_RELATION_NMATCH)){
char *newColData = calloc(cunit->dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1); char *newColData = calloc(cunit->dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1);
int len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData)); int32_t len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData));
varDataSetLen(newColData, len); if (len < 0){
(*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, newColData, cunit->valData); qError("castConvert1 taosUcs4ToMbs error");
}else{
varDataSetLen(newColData, len);
(*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, newColData, cunit->valData);
}
tfree(newColData); tfree(newColData);
}else if(cunit->dataType == TSDB_DATA_TYPE_JSON){ }else if(cunit->dataType == TSDB_DATA_TYPE_JSON){
doJsonCompare(cunit, &(*p)[i], colData); doJsonCompare(cunit, &(*p)[i], colData);
...@@ -3577,7 +3598,11 @@ int32_t filterConverNcharColumns(SFilterInfo* info, int32_t rows, bool *gotNchar ...@@ -3577,7 +3598,11 @@ int32_t filterConverNcharColumns(SFilterInfo* info, int32_t rows, bool *gotNchar
char *src = FILTER_GET_COL_FIELD_DATA(fi, j); char *src = FILTER_GET_COL_FIELD_DATA(fi, j);
char *dst = FILTER_GET_COL_FIELD_DATA(&nfi, j); char *dst = FILTER_GET_COL_FIELD_DATA(&nfi, j);
int32_t len = 0; int32_t len = 0;
taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len); bool ret = taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len);
if(!ret) {
qError("filterConverNcharColumns taosMbsToUcs4 error");
return TSDB_CODE_FAILED;
}
varDataLen(dst) = len; varDataLen(dst) = len;
} }
......
...@@ -183,7 +183,7 @@ bool likeOperator(SColumnFilterElem *pFilter, const char *minval, const char *ma ...@@ -183,7 +183,7 @@ bool likeOperator(SColumnFilterElem *pFilter, const char *minval, const char *ma
return patternMatch((char *)pFilter->filterInfo.pz, varDataVal(minval), varDataLen(minval), &info) == TSDB_PATTERN_MATCH; return patternMatch((char *)pFilter->filterInfo.pz, varDataVal(minval), varDataLen(minval), &info) == TSDB_PATTERN_MATCH;
} else if (type == TSDB_DATA_TYPE_NCHAR) { } else if (type == TSDB_DATA_TYPE_NCHAR) {
SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER;
return WCSPatternMatch((wchar_t*)pFilter->filterInfo.pz, varDataVal(minval), varDataLen(minval)/TSDB_NCHAR_SIZE, &info) == TSDB_PATTERN_MATCH; return WCSPatternMatch((uint32_t *) pFilter->filterInfo.pz, (uint32_t *) varDataVal(minval), varDataLen(minval)/TSDB_NCHAR_SIZE, &info) == TSDB_PATTERN_MATCH;
} else { } else {
return false; return false;
} }
......
...@@ -1463,6 +1463,7 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) { ...@@ -1463,6 +1463,7 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) {
tsdbFreeTable(pTable); tsdbFreeTable(pTable);
return NULL; return NULL;
} }
taosHashSetFreeFp(pTable->jsonKeyMap, taosArrayDestroyForHash);
}else{ }else{
pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL,
SL_ALLOW_DUP_KEY, getTagIndexKey); SL_ALLOW_DUP_KEY, getTagIndexKey);
......
...@@ -4243,20 +4243,28 @@ char* parseTagDatatoJson(void *p){ ...@@ -4243,20 +4243,28 @@ char* parseTagDatatoJson(void *p){
} }
cJSON_AddItemToObject(json, tagJsonKey, value); cJSON_AddItemToObject(json, tagJsonKey, value);
}else if(type == TSDB_DATA_TYPE_NCHAR) { }else if(type == TSDB_DATA_TYPE_NCHAR) {
char *tagJsonValue = calloc(varDataLen(realData), 1); cJSON* value = NULL;
int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), tagJsonValue); if (varDataLen(realData) > 0){
if (length < 0) { char *tagJsonValue = calloc(varDataLen(realData), 1);
tsdbError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), tagJsonValue);
(char*)val); if (length < 0) {
tsdbError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset,
(char*)val);
free(tagJsonValue);
goto end;
}
value = cJSON_CreateString(tagJsonValue);
free(tagJsonValue); free(tagJsonValue);
goto end; if (value == NULL)
} {
cJSON* value = cJSON_CreateString(tagJsonValue); goto end;
free(tagJsonValue); }
if (value == NULL) }else if(varDataLen(realData) == 0){
{ value = cJSON_CreateString("");
goto end; }else{
assert(0);
} }
cJSON_AddItemToObject(json, tagJsonKey, value); cJSON_AddItemToObject(json, tagJsonKey, value);
}else if(type == TSDB_DATA_TYPE_DOUBLE){ }else if(type == TSDB_DATA_TYPE_DOUBLE){
double jsonVd = *(double*)(realData); double jsonVd = *(double*)(realData);
......
...@@ -44,7 +44,7 @@ typedef struct SPatternCompareInfo { ...@@ -44,7 +44,7 @@ typedef struct SPatternCompareInfo {
int patternMatch(const char *pattern, const char *str, size_t size, const SPatternCompareInfo *pInfo); int patternMatch(const char *pattern, const char *str, size_t size, const SPatternCompareInfo *pInfo);
int WCSPatternMatch(const wchar_t *pattern, const wchar_t *str, size_t size, const SPatternCompareInfo *pInfo); int WCSPatternMatch(const uint32_t *pattern, const uint32_t *str, size_t size, const SPatternCompareInfo *pInfo);
int32_t doCompare(const char* a, const char* b, int32_t type, size_t size); int32_t doCompare(const char* a, const char* b, int32_t type, size_t size);
......
...@@ -321,29 +321,94 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat ...@@ -321,29 +321,94 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat
return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH; return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH;
} }
int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, const SPatternCompareInfo *pInfo) { static uint32_t *
wchar_t c, c1; taosWcschr (const uint32_t *wcs, const uint32_t wc)
wchar_t matchOne = L'_'; // "_" {
wchar_t matchAll = L'%'; // "%" const uint32_t *wcs2 = wcs + 1;
if (*wcs == wc)
return (uint32_t *) wcs;
if (*wcs == L'\0')
return NULL;
do
{
wcs += 2;
if (*wcs2 == wc)
return (uint32_t *) wcs2;
if (*wcs2 == L'\0')
return NULL;
wcs2 += 2;
if (*wcs == wc)
return (uint32_t *) wcs;
if (*wcs == L'\0')
return NULL;
wcs += 2;
if (*wcs2 == wc)
return (uint32_t *) wcs2;
if (*wcs2 == L'\0')
return NULL;
wcs2 += 2;
if (*wcs == wc)
return (uint32_t *) wcs;
if (*wcs == L'\0')
return NULL;
wcs += 2;
if (*wcs2 == wc)
return (uint32_t *) wcs2;
if (*wcs2 == L'\0')
return NULL;
wcs2 += 2;
if (*wcs == wc)
return (uint32_t *) wcs;
if (*wcs == L'\0')
return NULL;
wcs += 2;
if (*wcs2 == wc)
return (uint32_t *) wcs2;
if (*wcs2 == L'\0')
return NULL;
wcs2 += 2;
if (*wcs == wc)
return (uint32_t *) wcs;
}
while (*wcs != L'\0');
return NULL;
}
static size_t
taosWcscspn (const uint32_t *wcs, const uint32_t *reject)
{
size_t count = 0;
while (*wcs != L'\0')
if (taosWcschr (reject, *wcs++) == NULL)
++count;
else
return count;
return count;
}
int WCSPatternMatch(const uint32_t *patterStr, const uint32_t *str, size_t size, const SPatternCompareInfo *pInfo) {
uint32_t c, c1;
uint32_t matchOne = (uint32_t) L'_'; // "_"
uint32_t matchAll = (uint32_t) L'%'; // "%"
int32_t i = 0; int32_t i = 0;
int32_t j = 0; int32_t j = 0;
while ((c = patterStr[i++]) != 0) { while ((c = patterStr[i++]) != 0) {
if (c == matchAll) { /* Match "%" */ if (c == matchAll) { /* Match "%" */
while ((c = patterStr[i++]) == matchAll || c == matchOne) { while ((c = patterStr[i++]) == matchAll || c == matchOne) {
if (c == matchOne && (j >= size || str[j++] == 0)) { if (c == matchOne && (j >= size || str[j++] == 0)) {
return TSDB_PATTERN_NOWILDCARDMATCH; return TSDB_PATTERN_NOWILDCARDMATCH;
} }
} }
if (c == 0) { if (c == 0) {
return TSDB_PATTERN_MATCH; return TSDB_PATTERN_MATCH;
} }
wchar_t accept[3] = {towupper(c), towlower(c), 0}; uint32_t accept[3] = {towupper(c), towlower(c), 0};
while (1) { while (1) {
size_t n = wcscspn(str, accept); size_t n = taosWcscspn(str, accept);
str += n; str += n;
if (str[0] == 0 || (n >= size)) { if (str[0] == 0 || (n >= size)) {
...@@ -465,7 +530,7 @@ int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { ...@@ -465,7 +530,7 @@ int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
memcpy(str, varDataVal(pLeft), size * sizeof(wchar_t)); memcpy(str, varDataVal(pLeft), size * sizeof(wchar_t));
int32_t ret = WCSPatternMatch(pattern, str, size, &pInfo); int32_t ret = WCSPatternMatch((uint32_t *)pattern, (uint32_t *)str, size, &pInfo);
free(pattern); free(pattern);
free(str); free(str);
......
...@@ -282,7 +282,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_MESSED_MSG, "TSDB messed message") ...@@ -282,7 +282,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_MESSED_MSG, "TSDB messed message")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVLD_TAG_VAL, "TSDB invalid tag value") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVLD_TAG_VAL, "TSDB invalid tag value")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_CACHE_LAST_ROW, "TSDB no cache last row data") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_CACHE_LAST_ROW, "TSDB no cache last row data")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INCOMPLETE_DFILESET, "Incomplete DFileSet") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INCOMPLETE_DFILESET, "Incomplete DFileSet")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_JSON_TAG_KEY, "TSDB no tag json key")
// query // query
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, "Invalid handle") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, "Invalid handle")
......
...@@ -48,10 +48,7 @@ class TDTestCase: ...@@ -48,10 +48,7 @@ class TDTestCase:
def caseDescription(self): def caseDescription(self):
''' '''
case1 <wenzhouwww>: [TD-11389] : case1 <shenglian zhou>: [TD-12344] : fix session window for super table two stage query
this test case is an test case for cache error , it will let the cached data obtained by the client that has connected to taosd incorrect,
root cause : table schema is changed, tag hostname size is increased through schema-less insertion. The schema cache of client taos is not refreshed.
''' '''
return return
...@@ -97,9 +94,8 @@ class TDTestCase: ...@@ -97,9 +94,8 @@ class TDTestCase:
cfg_path = self.getcfgPath() cfg_path = self.getcfgPath()
print(cfg_path) print(cfg_path)
# tdSql.execute('select elapsed(ts,10s) from st where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1d) group by tbname;') # session not support super table tdSql.query('select elapsed(ts,10s) from st where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1d) group by tbname;') # session not support super table
os.system("taos -c %s -s 'select elapsed(ts,10s) from testdb.st where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" session(ts,1d) group by tbname;' " % (cfg_path)) tdSql.checkRows(10)
......
###################################################################
# Copyright (c) 2021 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def caseDescription(self):
'''
case1<shenglian zhou>: [TD-12229]fix union all query produces different result when switch query
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self._conn = conn
def run(self):
print("running {}".format(__file__))
tdSql.execute("drop database if exists td12229")
tdSql.execute("create database if not exists td12229")
tdSql.execute('use td12229')
tdSql.execute('create stable st(ts timestamp , value int ) tags (ind int)')
tdSql.execute('insert into tb1 using st tags(1) values(now ,1)')
tdSql.execute('insert into tb1 using st tags(1) values(now+1s ,2)')
tdSql.execute('insert into tb1 using st tags(1) values(now+2s ,3)')
tdSql.execute('create stable ste(ts timestamp , value int ) tags (ind int)')
tdSql.query('select * from st')
tdSql.checkRows(3)
tdSql.query('select * from st union all select * from ste')
tdSql.checkRows(3)
tdSql.query('select * from ste union all select * from st')
tdSql.checkRows(3)
tdSql.query('select elapsed(ts) from ste group by tbname union all select elapsed(ts) from st group by tbname;')
tdSql.checkRows(1)
tdSql.query('select elapsed(ts) from st group by tbname union all select elapsed(ts) from ste group by tbname;')
tdSql.checkRows(1)
tdSql.execute('drop database td12229')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
cd ${WKC}/src/connector/C#
dotnet test
dotnet run --project src/test/Cases/Cases.csproj
cd ${WKC}/tests/examples/C#
dotnet run --project C#checker/C#checker.csproj
dotnet run --project TDengineTest/TDengineTest.csproj
dotnet run --project schemaless/schemaless.csproj
cd ${WKC}/tests/examples/C#/taosdemo
dotnet build -c Release
tree | true
./bin/Release/net5.0/taosdemo -c /etc/taos -y
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
cd ${WKC}/src/connector/nodejs
npm install
npm run test
cd ${WKC}/tests/examples/nodejs
npm install td2.0-connector > /dev/null 2>&1
node nodejsChecker.js host=localhost
node test1970.js
cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport
npm install td2.0-connector > /dev/null 2>&1
node nanosecondTest.js
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../src/connector/python
pip3 install pytest
pytest tests/
python3 examples/bind-multi.py
python3 examples/bind-row.py
python3 examples/demo.py
python3 examples/insert-lines.py
python3 examples/pep-249.py
python3 examples/query-async.py
python3 examples/query-objectively.py
python3 examples/subscribe-sync.py
python3 examples/subscribe-async.py
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
\ No newline at end of file
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
import subprocess
class TDTestCase:
def caseDescription(self):
'''
case1<sdsang>: [TD-12362] taosdump supports JSON
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosdump" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def run(self):
tdSql.prepare()
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
tdSql.execute("use db")
tdSql.execute(
"create table st(ts timestamp, c1 int) tags(jtag JSON)")
tdSql.execute("create table t1 using st tags('{\"location\": \"beijing\"}')")
tdSql.execute("insert into t1 values(1500000000000, 1)")
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosdump not found!")
else:
tdLog.info("taosdump found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
if not os.path.exists(self.tmpdir):
os.makedirs(self.tmpdir)
else:
print("directory exists")
os.system("rm -rf %s" % self.tmpdir)
os.makedirs(self.tmpdir)
os.system("%staosdump --databases db -o %s" % (binPath, self.tmpdir))
tdSql.execute("drop database db")
os.system("%staosdump -i %s" % (binPath, self.tmpdir))
tdSql.query("show databases")
tdSql.checkRows(1)
tdSql.execute("use db")
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'st')
tdSql.query("show tables")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 't1')
tdSql.query("select jtag->'location' from st")
tdSql.checkRows(1)
tdSql.checkData(0, 0, "\"beijing\"")
tdSql.query("select * from st where jtag contains 'location'")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 1)
tdSql.checkData(0, 2, '{\"location\":\"beijing\"}')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
bash 3-connectors/c#/test.sh
bash 3-connectors/go/test.sh
bash 3-connectors/java/test.sh
bash 3-connectors/nodejs/test.sh
bash 3-connectors/python/test.sh
bash 3-connectors/restful/test.sh
bash 3-connectors/rust/test.sh
python3 ./test.py -f 1-insert/batchInsert.py
\ No newline at end of file
python3 ./test.py -f 0-others/json_tag.py
\ No newline at end of file
python3 ./test.py -f 2-query/ts_hidden_column.py
python3 ./test.py -f 2-query/union-order.py
python3 ./test.py -f 2-query/session_two_stage.py
python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeJson.py
\ No newline at end of file
python3 test.py -f 0-management/3-tag/json_tag.py
python3 test.py -f 1-insert/0-sql/batchInsert.py
python3 test.py -f 2-query/7-nest/ts_hidden_column.py
# -*- coding: utf-8 -*-
import yaml
import os
import time
from loguru import logger
current_time = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime(time.time()))
current_dir = os.path.dirname(os.path.realpath(__file__))
config_file = os.path.join(current_dir, '../config/perf_test.yaml')
f = open(config_file)
config = yaml.load(f, Loader=yaml.FullLoader)
log_file = os.path.join(current_dir, f'../log/performance_{current_time}.log')
logger.add(log_file)
logger.info(f'init env success, log will be export to {log_file}')
deploy_mode: no
taosd_autodeploy: False
install_package: /home/ubuntu/TDengine/release/TDengine-server-2.3.4.0-beta-Linux-x64.tar.gz
clean_env: True
hostname_prefix: vm
timeout: 10
taosd_dnode1:
ip: 192.168.1.85
port: 22
restful_port: 6041
telnet_port: 6051
username: root
password: ******
modify_cfg: False
cfg:
dataDir: /data/lib/taos
logDir: /data/log/taos
taosd_cluster: False
taosadapter_separate_deploy: True
taosd_dnode2:
ip: 192.168.1.83
port: 22
restful_port: 6041
telnet_port: 6046
username: root
password: ******
modify_cfg: False
cfg:
dataDir: /data/lib/taos
logDir: /data/log/taos
taosd_dnode3:
ip: 192.168.1.84
port: 22
restful_port: 6041
telnet_port: 6046
username: root
password: ******
modify_cfg: False
cfg:
dataDir: /data/lib/taos
logDir: /data/log/taos
taosd_dnode4:
ip: 192.168.1.86
port: 22
restful_port: 6041
telnet_port: 6046
username: root
password: ******
modify_cfg: False
cfg:
dataDir: /data/lib/taos
logDir: /data/log/taos
prometheus:
autodeploy: True
ip: 192.168.1.101
port: 22
username: root
password: ******
scrape_interval: 3s
evaluation_interval: 1s
scrape_timeout: 3s
prometheus_addr: http://39.105.163.10:9000/prometheus-2.31.1.linux-amd64.tar.gz
node_exporter_addr: https://github.com/prometheus/node_exporter/releases/download/v1.3.1/node_exporter-1.3.1.linux-amd64.tar.gz
process_exporter_addr: https://github.com/ncabatoff/process-exporter/releases/download/v0.7.10/process-exporter-0.7.10.linux-amd64.tar.gz
jmeter:
autodeploy: False
aggregate_report: True
clean_aggregate_report: True
ip: 127.0.0.1
port: 22
username: root
password: ******
jmeter_addr: https://dlcdn.apache.org//jmeter/binaries/apache-jmeter-5.4.1.tgz
testcases:
testcase1:
threads: 24
protocol: telnet-restful
taosadapter_count: 1
stb_count: 1
tb_count: 1
row_count: 3000000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase2:
threads: 32
protocol: telnet-restful
taosadapter_count: 1
stb_count: 1
tb_count: 1
row_count: 3000000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase3:
threads: 64
protocol: telnet-restful
taosadapter_count: 1
stb_count: 1
tb_count: 1
row_count: 3000000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase4:
threads: 100
protocol: telnet-restful
taosadapter_count: 1
stb_count: 1
tb_count: 1
row_count: 5000000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase5:
threads: 100
protocol: telnet-restful
taosadapter_count: 2
stb_count: 1
tb_count: 1
row_count: 5000000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase6:
threads: 100
protocol: telnet-restful
taosadapter_count: 3
stb_count: 1
tb_count: 1
row_count: 5000000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase7:
threads: 100
protocol: telnet-restful
taosadapter_count: 1
stb_count: 1000000
tb_count: 1
row_count: 1
sleep_time: 60s
tag_count: 10
col_count: 1
testcase8:
threads: 100
protocol: telnet-restful
taosadapter_count: 1
stb_count: 1
tb_count: 5000000
row_count: 1
sleep_time: 60s
tag_count: 10
col_count: 1
testcase9:
threads: 100
protocol: telnet-restful
taosadapter_count: 1
stb_count: 1
tb_count: 100000
row_count: 1000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase10:
threads: 100
protocol: telnet-restful
taosadapter_count: 1
stb_count: 1
tb_count: 10
row_count: 10000000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase11:
threads: 24
protocol: json
taosadapter_count: 1
stb_count: 1
tb_count: 1
row_count: 3000000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase12:
threads: 32
protocol: json
taosadapter_count: 1
stb_count: 1
tb_count: 1
row_count: 3000000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase13:
threads: 64
protocol: json
taosadapter_count: 1
stb_count: 1
tb_count: 1
row_count: 3000000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase14:
threads: 100
protocol: json
taosadapter_count: 1
stb_count: 1
tb_count: 1
row_count: 5000000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase15:
threads: 100
protocol: json
taosadapter_count: 2
stb_count: 1
tb_count: 1
row_count: 5000000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase16:
threads: 100
protocol: json
taosadapter_count: 3
stb_count: 1
tb_count: 1
row_count: 5000000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase17:
threads: 100
protocol: json
taosadapter_count: 1
stb_count: 1000000
tb_count: 1
row_count: 1
sleep_time: 60s
tag_count: 10
col_count: 1
testcase18:
threads: 100
protocol: json
taosadapter_count: 1
stb_count: 1
tb_count: 5000000
row_count: 1
sleep_time: 60s
tag_count: 10
col_count: 1
testcase19:
threads: 100
protocol: json
taosadapter_count: 1
stb_count: 1
tb_count: 100000
row_count: 1000
sleep_time: 60s
tag_count: 10
col_count: 1
testcase20:
threads: 100
protocol: json
taosadapter_count: 1
stb_count: 1
tb_count: 10
row_count: 10000000
sleep_time: 60s
tag_count: 10
col_count: 1
<?xml version="1.0" encoding="UTF-8"?>
<jmeterTestPlan version="1.2" properties="5.0" jmeter="5.4.1">
<hashTree>
<TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
<stringProp name="TestPlan.comments"></stringProp>
<boolProp name="TestPlan.functional_mode">false</boolProp>
<boolProp name="TestPlan.tearDown_on_shutdown">true</boolProp>
<boolProp name="TestPlan.serialize_threadgroups">true</boolProp>
<elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
<collectionProp name="Arguments.arguments"/>
</elementProp>
<stringProp name="TestPlan.user_define_classpath"></stringProp>
</TestPlan>
<hashTree>
<ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="drop db" enabled="drop_db_status">
<stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
<elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
<boolProp name="LoopController.continue_forever">false</boolProp>
<stringProp name="LoopController.loops">1</stringProp>
</elementProp>
<stringProp name="ThreadGroup.num_threads">1</stringProp>
<stringProp name="ThreadGroup.ramp_time">1</stringProp>
<boolProp name="ThreadGroup.scheduler">false</boolProp>
<stringProp name="ThreadGroup.duration"></stringProp>
<stringProp name="ThreadGroup.delay"></stringProp>
<boolProp name="ThreadGroup.same_user_on_next_iteration">true</boolProp>
</ThreadGroup>
<hashTree>
<HTTPSamplerProxy guiclass="HttpTestSampleGui" testclass="HTTPSamplerProxy" testname="drop db" enabled="true">
<boolProp name="HTTPSampler.postBodyRaw">true</boolProp>
<elementProp name="HTTPsampler.Arguments" elementType="Arguments">
<collectionProp name="Arguments.arguments">
<elementProp name="" elementType="HTTPArgument">
<boolProp name="HTTPArgument.always_encode">false</boolProp>
<stringProp name="Argument.value">drop database if exists db_name</stringProp>
<stringProp name="Argument.metadata">=</stringProp>
</elementProp>
</collectionProp>
</elementProp>
<stringProp name="HTTPSampler.domain"></stringProp>
<stringProp name="HTTPSampler.port"></stringProp>
<stringProp name="HTTPSampler.protocol"></stringProp>
<stringProp name="HTTPSampler.contentEncoding"></stringProp>
<stringProp name="HTTPSampler.path">http://restful_ip:restful_port/rest/sql</stringProp>
<stringProp name="HTTPSampler.method">POST</stringProp>
<boolProp name="HTTPSampler.follow_redirects">true</boolProp>
<boolProp name="HTTPSampler.auto_redirects">false</boolProp>
<boolProp name="HTTPSampler.use_keepalive">true</boolProp>
<boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
<stringProp name="HTTPSampler.embedded_url_re"></stringProp>
<stringProp name="HTTPSampler.connect_timeout"></stringProp>
<stringProp name="HTTPSampler.response_timeout"></stringProp>
</HTTPSamplerProxy>
<hashTree/>
</hashTree>
<ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="create db" enabled="create_db_status">
<stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
<elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
<boolProp name="LoopController.continue_forever">false</boolProp>
<stringProp name="LoopController.loops">1</stringProp>
</elementProp>
<stringProp name="ThreadGroup.num_threads">1</stringProp>
<stringProp name="ThreadGroup.ramp_time">1</stringProp>
<boolProp name="ThreadGroup.scheduler">false</boolProp>
<stringProp name="ThreadGroup.duration"></stringProp>
<stringProp name="ThreadGroup.delay"></stringProp>
<boolProp name="ThreadGroup.same_user_on_next_iteration">true</boolProp>
</ThreadGroup>
<hashTree>
<HTTPSamplerProxy guiclass="HttpTestSampleGui" testclass="HTTPSamplerProxy" testname="create db" enabled="true">
<boolProp name="HTTPSampler.postBodyRaw">true</boolProp>
<elementProp name="HTTPsampler.Arguments" elementType="Arguments">
<collectionProp name="Arguments.arguments">
<elementProp name="" elementType="HTTPArgument">
<boolProp name="HTTPArgument.always_encode">false</boolProp>
<stringProp name="Argument.value">create database if not exists db_name precision &apos;ms&apos;</stringProp>
<stringProp name="Argument.metadata">=</stringProp>
</elementProp>
</collectionProp>
</elementProp>
<stringProp name="HTTPSampler.domain"></stringProp>
<stringProp name="HTTPSampler.port"></stringProp>
<stringProp name="HTTPSampler.protocol"></stringProp>
<stringProp name="HTTPSampler.contentEncoding"></stringProp>
<stringProp name="HTTPSampler.path">http://restful_ip:restful_port/rest/sql</stringProp>
<stringProp name="HTTPSampler.method">POST</stringProp>
<boolProp name="HTTPSampler.follow_redirects">true</boolProp>
<boolProp name="HTTPSampler.auto_redirects">false</boolProp>
<boolProp name="HTTPSampler.use_keepalive">true</boolProp>
<boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
<stringProp name="HTTPSampler.embedded_url_re"></stringProp>
<stringProp name="HTTPSampler.connect_timeout"></stringProp>
<stringProp name="HTTPSampler.response_timeout"></stringProp>
</HTTPSamplerProxy>
<hashTree/>
</hashTree>
<ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="taosadapter performance test" enabled="true">
<stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
<elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
<boolProp name="LoopController.continue_forever">false</boolProp>
<stringProp name="LoopController.loops">loop_count</stringProp>
</elementProp>
<stringProp name="ThreadGroup.num_threads">perf_threads</stringProp>
<stringProp name="ThreadGroup.ramp_time"></stringProp>
<boolProp name="ThreadGroup.scheduler">false</boolProp>
<stringProp name="ThreadGroup.duration"></stringProp>
<stringProp name="ThreadGroup.delay"></stringProp>
<boolProp name="ThreadGroup.same_user_on_next_iteration">true</boolProp>
</ThreadGroup>
<hashTree>
<HTTPSamplerProxy guiclass="HttpTestSampleGui" testclass="HTTPSamplerProxy" testname="telnet-restful" enabled="telnet_restful_status">
<boolProp name="HTTPSampler.postBodyRaw">true</boolProp>
<elementProp name="HTTPsampler.Arguments" elementType="Arguments">
<collectionProp name="Arguments.arguments">
<elementProp name="" elementType="HTTPArgument">
<boolProp name="HTTPArgument.always_encode">false</boolProp>
<stringProp name="Argument.value">input_line</stringProp>
<stringProp name="Argument.metadata">=</stringProp>
</elementProp>
</collectionProp>
</elementProp>
<stringProp name="HTTPSampler.domain"></stringProp>
<stringProp name="HTTPSampler.port"></stringProp>
<stringProp name="HTTPSampler.protocol"></stringProp>
<stringProp name="HTTPSampler.contentEncoding"></stringProp>
<stringProp name="HTTPSampler.path">http://restful_ip:restful_port/opentsdb/v1/put/line_protocol/db_name</stringProp>
<stringProp name="HTTPSampler.method">POST</stringProp>
<boolProp name="HTTPSampler.follow_redirects">true</boolProp>
<boolProp name="HTTPSampler.auto_redirects">false</boolProp>
<boolProp name="HTTPSampler.use_keepalive">true</boolProp>
<boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
<stringProp name="HTTPSampler.embedded_url_re"></stringProp>
<stringProp name="HTTPSampler.connect_timeout"></stringProp>
<stringProp name="HTTPSampler.response_timeout"></stringProp>
</HTTPSamplerProxy>
<hashTree/>
<TCPSampler guiclass="TCPSamplerGui" testclass="TCPSampler" testname="telnet-tcp" enabled="telnet_tcp_status">
<stringProp name="TCPSampler.classname">org.apache.jmeter.protocol.tcp.sampler.TCPClientImpl</stringProp>
<stringProp name="TCPSampler.server">telnet_ip</stringProp>
<boolProp name="TCPSampler.reUseConnection">true</boolProp>
<stringProp name="TCPSampler.port">telnet_port</stringProp>
<boolProp name="TCPSampler.nodelay">false</boolProp>
<stringProp name="TCPSampler.timeout">1</stringProp>
<stringProp name="TCPSampler.ctimeout">500</stringProp>
<stringProp name="TCPSampler.request">input_line</stringProp>
<boolProp name="TCPSampler.closeConnection">false</boolProp>
<stringProp name="TCPSampler.EolByte">10</stringProp>
<stringProp name="ConfigTestElement.username"></stringProp>
<stringProp name="ConfigTestElement.password"></stringProp>
</TCPSampler>
<hashTree/>
</hashTree>
<ResultCollector guiclass="StatVisualizer" testclass="ResultCollector" testname="Aggregate Report" enabled="true">
<boolProp name="ResultCollector.error_logging">false</boolProp>
<objProp>
<name>saveConfig</name>
<value class="SampleSaveConfiguration">
<time>true</time>
<latency>true</latency>
<timestamp>true</timestamp>
<success>true</success>
<label>true</label>
<code>true</code>
<message>true</message>
<threadName>true</threadName>
<dataType>true</dataType>
<encoding>false</encoding>
<assertions>true</assertions>
<subresults>true</subresults>
<responseData>false</responseData>
<samplerData>false</samplerData>
<xml>false</xml>
<fieldNames>true</fieldNames>
<responseHeaders>false</responseHeaders>
<requestHeaders>false</requestHeaders>
<responseDataOnError>false</responseDataOnError>
<saveAssertionResultsFailureMessage>true</saveAssertionResultsFailureMessage>
<assertionsResultsToSave>0</assertionsResultsToSave>
<bytes>true</bytes>
<sentBytes>true</sentBytes>
<url>true</url>
<threadCounts>true</threadCounts>
<idleTime>true</idleTime>
<connectTime>true</connectTime>
</value>
</objProp>
<stringProp name="filename"></stringProp>
</ResultCollector>
<hashTree/>
<HeaderManager guiclass="HeaderPanel" testclass="HeaderManager" testname="HTTP Header Manager" enabled="true">
<collectionProp name="HeaderManager.headers">
<elementProp name="" elementType="Header">
<stringProp name="Header.name">Authorization</stringProp>
<stringProp name="Header.value">Basic cm9vdDp0YW9zZGF0YQ==</stringProp>
</elementProp>
</collectionProp>
</HeaderManager>
<hashTree/>
<ResultCollector guiclass="ViewResultsFullVisualizer" testclass="ResultCollector" testname="View Results Tree" enabled="true">
<boolProp name="ResultCollector.error_logging">false</boolProp>
<objProp>
<name>saveConfig</name>
<value class="SampleSaveConfiguration">
<time>true</time>
<latency>true</latency>
<timestamp>true</timestamp>
<success>true</success>
<label>true</label>
<code>true</code>
<message>true</message>
<threadName>true</threadName>
<dataType>true</dataType>
<encoding>false</encoding>
<assertions>true</assertions>
<subresults>true</subresults>
<responseData>false</responseData>
<samplerData>false</samplerData>
<xml>false</xml>
<fieldNames>true</fieldNames>
<responseHeaders>false</responseHeaders>
<requestHeaders>false</requestHeaders>
<responseDataOnError>false</responseDataOnError>
<saveAssertionResultsFailureMessage>true</saveAssertionResultsFailureMessage>
<assertionsResultsToSave>0</assertionsResultsToSave>
<bytes>true</bytes>
<sentBytes>true</sentBytes>
<url>true</url>
<threadCounts>true</threadCounts>
<idleTime>true</idleTime>
<connectTime>true</connectTime>
</value>
</objProp>
<stringProp name="filename"></stringProp>
</ResultCollector>
<hashTree/>
<CSVDataSet guiclass="TestBeanGUI" testclass="CSVDataSet" testname="CSV Data Set Config" enabled="true">
<stringProp name="delimiter">,</stringProp>
<stringProp name="fileEncoding">UTF-8</stringProp>
<stringProp name="filename">import_file</stringProp>
<boolProp name="ignoreFirstLine">false</boolProp>
<boolProp name="quotedData">false</boolProp>
<boolProp name="recycle">true</boolProp>
<stringProp name="shareMode">shareMode.all</stringProp>
<boolProp name="stopThread">false</boolProp>
<stringProp name="variableNames">row_csv_count,tb_csv_count,stb_csv_count</stringProp>
</CSVDataSet>
<hashTree/>
</hashTree>
</hashTree>
</jmeterTestPlan>
from config.env_init import *
from src.common.common import Common
from src.common.dnodes import Dnodes
from src.common.monitor import Monitor
from src.util.jmeter import Jmeter
class RunPerformance:
def __init__(self):
self.COM = Common()
self.current_dir = os.path.dirname(os.path.realpath(__file__))
self.log_dir = os.path.join(self.current_dir, f'./log')
if config["jmeter"]["clean_aggregate_report"]:
self.COM.exec_local_cmd(f'sudo rm -rf {self.log_dir}/testcase*')
def runJmeter(self):
for key, value in config['testcases'].items():
jmx_file_list = list()
logger.info(f'executing {key}')
for jmx_file in self.COM.genJmxFile(key)[:value["taosadapter_count"]]:
jmx_filename = jmx_file.split('/')[-1]
import_file_name = jmx_filename.replace('jmx', 'txt')
import_file = os.path.join(self.current_dir, f'./config/{import_file_name}')
loop_count = self.COM.getLoopCount(value["stb_count"], value["tb_count"], value["row_count"], value["threads"])
self.COM.genMixStbTbRows(import_file, value["stb_count"], value["tb_count"], value["row_count"])
input_line = self.COM.genProtocolLine(value["protocol"], value["tag_count"])
with open(jmx_file, 'r', encoding='utf-8') as f:
file_data = ""
for line in f:
if value['protocol'] == 'telnet-tcp':
if "telnet_tcp_status" in line:
line = line.replace("telnet_tcp_status", "true")
if value['protocol'] == 'telnet-restful' or value['protocol'] == 'json':
if "drop_db_status" in line:
line = line.replace("drop_db_status", "true")
if "create_db_status" in line:
line = line.replace("create_db_status", "true")
if "telnet_restful_status" in line:
line = line.replace("telnet_restful_status", "true")
if "line_protocol" in line:
if value['protocol'] == 'telnet-restful':
line = line.replace("line_protocol", 'telnet')
elif value['protocol'] == 'json':
line = line.replace("line_protocol", 'json')
else:
pass
if "db_name" in line:
db_name = jmx_filename.split('.')[0]
line = line.replace("db_name", db_name)
if "import_file" in line:
line = line.replace("import_file", import_file)
if "input_line" in line:
line = line.replace("input_line", input_line)
if "perf_threads" in line:
line = line.replace("perf_threads", str(value['threads']))
if "loop_count" in line:
line = line.replace("loop_count", str(loop_count))
file_data += line
with open(jmx_file, "w", encoding="utf-8") as f:
f.write(file_data)
jmx_file_list.append(jmx_file)
jmeter_cmd_list = self.COM.genJmeterCmd(jmx_file_list)
self.COM.multiThreadRun(self.COM.genJmeterThreads(jmeter_cmd_list))
time.sleep(int(''.join(list(filter(str.isdigit, str(value["sleep_time"]))))))
if __name__ == '__main__':
Performance = RunPerformance()
DNODES = Dnodes()
MONITOR = Monitor()
JMETER = Jmeter()
if config['deploy_mode'] == "auto":
if config['taosd_autodeploy']:
DNODES.deployNodes()
if config["prometheus"]["autodeploy"]:
MONITOR.deployAllNodeExporters()
MONITOR.deployAllProcessExporters()
MONITOR.deployPrometheus()
MONITOR.deployGrafana()
if config["jmeter"]["autodeploy"]:
JMETER.deployJmeter()
Performance.runJmeter()
import sys
sys.path.append("../../")
from config.env_init import *
import shutil
import threading
import time
import json
class Common:
def __init__(self):
self.ip_list = list()
self.current_dir = os.path.dirname(os.path.realpath(__file__))
self.base_jmx_file = os.path.join(self.current_dir, '../../config/taosadapter_performance_test.jmx')
self.log_dir = os.path.join(self.current_dir, '../../log')
def exec_local_cmd(self,shell_cmd):
logger.info(f'executing cmd: {shell_cmd}')
result = os.popen(shell_cmd).read().strip()
logger.info(result)
return result
def genTelnetMulTagStr(self, count):
tag_str = ""
for i in range(1, count):
if i < (count-1):
tag_str += f't{i}={i} '
else:
tag_str += f't{i}={i}'
return tag_str
def genJsonMulTagDict(self, count):
tag_dict = dict()
for i in range(1, count):
tag_dict[f"t{i}"] = f"{i}"
return tag_dict
def genProtocolLine(self, protocol, tag_count, col_count=None):
if protocol == "telnet-restful":
base_str = 'stb_${stb_csv_count} ${row_csv_count} 32.261068286779754 t0=${tb_csv_count} '
tag_str = self.genTelnetMulTagStr(tag_count)
telnet_line = base_str + tag_str
return telnet_line
elif protocol == "telnet-tcp":
base_str = 'tstb_${stb_csv_count} ${row_csv_count} 32.261068286779754 t0=${tb_csv_count} '
tag_str = self.genTelnetMulTagStr(tag_count)
telnet_line = base_str + tag_str + '${__unescape(\r\n)}'
return telnet_line
elif protocol == "json":
base_tag_dict = {"t0":"${tb_csv_count}"}
dict_merged = base_tag_dict.copy()
dict_merged.update(self.genJsonMulTagDict(tag_count))
json_line = '{"metric": "stb_${stb_csv_count}", "timestamp":${row_csv_count}, "value":32.261068286779754, ' + f'"tags": {dict_merged}' + '}'
return json_line.replace('\'','"')
elif protocol == "influxdb":
# TODO
pass
else:
pass
def genMixStbTbRows(self, filename, stb_count, tb_count, row_count):
if stb_count == 0:
stb_count = 1
if tb_count == 0:
tb_count = 1
if row_count == 0:
row_count = 1
logger.info(f'generating import data file: {filename}')
ts_start = 1614530008000
with open(filename, "w", encoding="utf-8") as f_w:
for k in range(stb_count):
for i in range(tb_count):
for j in range(row_count):
input_line = str(ts_start) + "," + str(i) + "," + str(k) + '\n'
ts_start += 1
f_w.write(input_line)
def genJmxFile(self, testcase):
des_jmx_file_list = list()
base_jmx_file = os.path.join(self.current_dir, '../../config/taosadapter_performance_test.jmx')
count_flag = 0
if config["taosadapter_separate_deploy"]:
for key in config:
if "taosd_dnode" in str(key) and "taosd_dnode1" not in str(key):
if count_flag < int(config['testcases'][testcase]['taosadapter_count']):
count_flag += 1
else:
break
des_jmx_file = os.path.join(self.current_dir, f'../../config/{testcase}_{key}.jmx')
shutil.copyfile(base_jmx_file, des_jmx_file)
with open(des_jmx_file, 'r', encoding='utf-8') as f:
file_data = ""
for line in f:
if "restful_ip" in line:
line = line.replace("restful_ip", config[key]['ip'])
if "restful_port" in line:
line = line.replace("restful_port", str(config[key]['restful_port']))
if "telnet_ip" in line:
line = line.replace("telnet_ip", config[key]['ip'])
if "telnet_port" in line:
line = line.replace("telnet_port", str(config[key]['telnet_port']))
# if "db_name" in line:
# line = line.replace("db_name", key)
file_data += line
with open(des_jmx_file, "w", encoding="utf-8") as f:
f.write(file_data)
des_jmx_file_list.append(des_jmx_file)
else:
des_jmx_file = os.path.join(self.current_dir, f'../../config/{testcase}_taosd_dnode1.jmx')
shutil.copyfile(base_jmx_file, des_jmx_file)
with open(des_jmx_file, 'r', encoding='utf-8') as f:
file_data = ""
for line in f:
if "restful_ip" in line:
line = line.replace("restful_ip", config['taosd_dnode1']['ip'])
if "restful_port" in line:
line = line.replace("restful_port", str(config['taosd_dnode1']['restful_port']))
if "telnet_ip" in line:
line = line.replace("telnet_ip", config['taosd_dnode1']['ip'])
if "telnet_port" in line:
line = line.replace("telnet_port", str(config['taosd_dnode1']['telnet_port']))
# if "db_name" in line:
# line = line.replace("db_name", "taosd_dnode1")
file_data += line
with open(des_jmx_file, "w", encoding="utf-8") as f:
f.write(file_data)
des_jmx_file_list.append(des_jmx_file)
return des_jmx_file_list
def getLoopCount(self, stb_count, tb_count, row_count, threads):
if (stb_count * tb_count * row_count) % threads == 0:
loop_count = int((stb_count * tb_count * row_count) / threads)
else:
loop_count = int((stb_count * tb_count * row_count) / threads) + 1
return loop_count
def recreateReportDir(self, path):
'''
recreate jmeter report path
'''
if os.path.exists(path):
self.exec_local_cmd(f'rm -rf {path}/*')
else:
os.makedirs(path)
def genJmeterCmd(self, jmx_file_list):
jmeter_cmd_list = list()
for jmx_file in jmx_file_list:
jmeter_cmd = f'jmeter -n -t {jmx_file}'
if config['jmeter']['aggregate_report']:
current_time = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime(time.time()))
jmx_filename = jmx_file.split('/')[-1].replace('.jmx', '')
jmx_filelog = f'{jmx_filename}_{current_time}'
jmeter_report_dir = f'{self.log_dir}/{jmx_filelog}'
self.recreateReportDir(jmeter_report_dir)
jmeter_cmd += f' -l {jmeter_report_dir}/{jmx_filelog}.log -e -o {jmeter_report_dir}'
jmeter_cmd_list.append(jmeter_cmd)
return jmeter_cmd_list
def genJmeterThreads(self, jmeter_cmd_list):
tlist = list()
for jmeter_cmd in jmeter_cmd_list:
t = threading.Thread(target=self.exec_local_cmd, args=(jmeter_cmd,))
tlist.append(t)
return tlist
def multiThreadRun(self, tlist):
for t in tlist:
t.start()
for t in tlist:
t.join()
if __name__ == '__main__':
com = Common()
import sys
import json
sys.path.append("../../")
from config.env_init import *
from src.util.RemoteModule import RemoteModule
class Dnode:
def __init__(self, index, dnode_ip, dnode_port, dnode_username, dnode_password):
self.install_package = config["install_package"]
self.hostname_prefix = config["hostname_prefix"]
self.ip_suffix = dnode_ip.split('.')[-1]
self.dnode_name = self.hostname_prefix + self.ip_suffix
self.index = index
self.dnode_dict = config[f'taosd_dnode{self.index}']
self.dnode_ip = dnode_ip
self.dnode_port = dnode_port
self.dnode_username = dnode_username
self.dnode_password = dnode_password
self.dnode_conn = RemoteModule(self.dnode_ip, self.dnode_port, self.dnode_username, self.dnode_password)
if self.dnode_username == "root":
self.home_dir = "/root"
else:
self.home_dir = f"/home/{self.dnode_username}"
def installPackage(self):
if bool(int(self.dnode_conn.exec_cmd(f'cat /etc/os-release | grep ubuntu >> /dev/null && echo 1 || echo 0'))):
package_list = ["wget", "screen"]
for package in package_list:
if not bool(int(self.dnode_conn.exec_cmd(f'sudo dpkg -s {package} >> /dev/null && echo 1 || echo 0'))):
self.dnode_conn.exec_cmd(f'apt update -y && apt install -y {package}')
elif bool(int(self.dnode_conn.exec_cmd(f'cat /etc/os-release | grep centos >> /dev/null && echo 1 || echo 0'))):
package_list = ["wget", "screen"]
for package in package_list:
if not bool(int(self.dnode_conn.exec_cmd(f'sudo rpm -qa | grep {package} >> /dev/null && echo 1 || echo 0'))):
self.dnode_conn.exec_cmd(f'yum update -y && yum install -y {package}')
else:
pass
def startTaosd(self):
logger.info(f'{self.dnode_ip}: starting taosd')
self.dnode_conn.exec_cmd("sudo systemctl start taosd")
def stopTaosd(self):
logger.info(f'{self.dnode_ip}: stopping taosd')
self.dnode_conn.exec_cmd("sudo systemctl stop taosd")
def killTaosd(self):
logger.info(f'{self.dnode_ip}: killing taosd')
self.dnode_conn.exec_cmd("ps -ef | grep -w taosd | grep -v grep | awk \'{print $2}\' | sudo xargs kill -9")
def restartTaosd(self):
logger.info(f'{self.dnode_ip}: restarting taosd')
self.dnode_conn.exec_cmd("sudo systemctl restart taosd")
def startTaosadapter(self):
logger.info(f'{self.dnode_ip}: starting taosadapter')
self.dnode_conn.exec_cmd("sudo systemctl start taosadapter")
def stopTaosadapter(self):
logger.info(f'{self.dnode_ip}: stopping taosadapter')
self.dnode_conn.exec_cmd("sudo systemctl stop taosadapter")
def killTaosadapter(self):
logger.info(f'{self.dnode_ip}: killing taosadapter')
self.dnode_conn.exec_cmd("ps -ef | grep -w taosadapter | grep -v grep | awk \'{print $2}\' | sudo xargs kill -9")
def restartTaosadapter(self):
logger.info(f'{self.dnode_ip}: restarting taosadapter')
self.dnode_conn.exec_cmd("sudo systemctl restart taosadapter")
def rmTaosd(self):
logger.info(f'{self.dnode_ip}: removing taosd')
self.dnode_conn.exec_cmd("rmtaos")
def rmTaosdLog(self):
logger.info(f'{self.dnode_ip}: removing taosd log')
if self.dnode_dict["modify_cfg"]:
self.dnode_conn.exec_cmd(f'sudo rm -rf {self.dnode_dict["cfg"]["logDir"]}/*')
else:
self.dnode_conn.exec_cmd("sudo rm -rf /var/log/taos/*")
def rmTaosdData(self):
logger.info(f'{self.dnode_ip}: removing taosd data')
if self.dnode_dict["modify_cfg"]:
self.dnode_conn.exec_cmd(f'sudo rm -rf {self.dnode_dict["cfg"]["dataDir"]}/*')
else:
self.dnode_conn.exec_cmd("sudo rm -rf /var/lib/taos/*")
def rmTaosCfg(self):
logger.info(f'{self.dnode_ip}: removing taos.cfg')
self.dnode_conn.exec_cmd("sudo rm -rf /etc/taos/taos.cfg")
def modifyTaosCfg(self, firstEp=None):
hostname = self.configHostname()
if self.dnode_dict["modify_cfg"]:
logger.info(f'{self.dnode_ip}: modify /etc/taos/taos.cfg')
for key, value in self.dnode_dict['cfg'].items():
self.createRemoteDir(value)
self.dnode_conn.exec_cmd(f'echo {key} {value} >> /etc/taos/taos.cfg')
if firstEp is not None:
self.dnode_conn.exec_cmd(f'echo "firstEp {firstEp}" >> /etc/taos/taos.cfg')
self.dnode_conn.exec_cmd(f'echo "fqdn {hostname}" >> /etc/taos/taos.cfg')
def createRemoteDir(self, dir):
'''
if exist: echo 1
else: echo 0
'''
res = bool(int(self.dnode_conn.exec_cmd(f'[ -e {dir} ] && echo 1 || echo 0')))
if not res:
self.dnode_conn.exec_cmd(f'sudo mkdir -p {dir}')
def getHostname(self, ip=None):
if ip == self.dnode_ip:
return self.dnode_conn.exec_cmd('hostname').strip()
else:
return False
def configHostname(self):
logger.info(f'{self.dnode_ip}: config dnode hostname')
ori_hostname = self.dnode_conn.exec_cmd('hostname').strip()
if "localhost" in str(ori_hostname).lower():
self.dnode_conn.exec_cmd(f"sudo hostnamectl set-hostname {self.dnode_name}")
return self.dnode_name
return ori_hostname
def hostsIsExist(self, ip, hostname):
host_count = int(self.dnode_conn.exec_cmd(f'grep "^{ip}.*.{hostname}" /etc/hosts | wc -l'))
if host_count > 0:
logger.info(f'{self.dnode_ip}: check /etc/hosts: {ip} {hostname} existed')
return True
else:
logger.info(f'{self.dnode_ip}: check /etc/hosts: {ip} {hostname} not exist')
return False
def configHosts(self, ip, hostname):
if not self.hostsIsExist(ip, hostname):
logger.info(f'{self.dnode_ip}: config dnode /etc/hosts: {ip} {hostname}')
self.dnode_conn.exec_cmd(f'sudo echo "{ip} {hostname}" >> /etc/hosts')
def checkStatus(self, process):
process_count = self.dnode_conn.exec_cmd(f'ps -ef | grep -w {process} | grep -v grep | wc -l')
if int(process_count.strip()) > 0:
logger.info(f'check {self.dnode_ip} {process} existed')
return True
else:
logger.info(f'check {self.dnode_ip} {process} not exist')
return False
def taoscCreateDnodes(self):
firstEp = f'{self.configHostname()}:6030'
self.dnode_conn.exec_cmd(f'sudo taos -s "create dnode \'{firstEp}\'"')
ready_count = self.dnode_conn.exec_cmd(f'taos -s "show dnodes" | grep {firstEp} | grep ready | wc -l')
ready_flag = 0
if int(ready_count) == 1:
logger.success(f'deploy dnode {firstEp} success')
while int(ready_count) != 1:
if ready_flag < config["timeout"]:
ready_flag += 1
else:
logger.error(f'deploy cluster {firstEp} failed, please check by manual')
time.sleep(1)
ready_count = self.dnode_conn.exec_cmd(f'taos -s "show dnodes" | grep {firstEp} | grep ready | wc -l')
if int(ready_count) == 1:
logger.success(f'deploy dnode {firstEp} success')
def downloadNodeExporter(self):
logger.info(f'{self.dnode_ip}: downloading node_exporter from {config["prometheus"]["node_exporter_addr"]}')
tar_file_name = config["prometheus"]["node_exporter_addr"].split("/")[-1]
if not bool(int(self.dnode_conn.exec_cmd(f'[ -e ~/{tar_file_name} ] && echo 1 || echo 0'))):
self.dnode_conn.exec_cmd(f'wget -P ~ {config["prometheus"]["node_exporter_addr"]}')
def configNodeExporterService(self):
logger.info(f'{self.dnode_ip}: configing /lib/systemd/system/node_exporter.service')
if not bool(int(self.dnode_conn.exec_cmd(f'[ -e /lib/systemd/system/node_exporter.service ] && echo 1 || echo 0'))):
self.dnode_conn.exec_cmd(f'sudo echo -e [Service]\n\
User=prometheus\n\
Group=prometheus\n\
ExecStart=/usr/local/bin/node_exporter\n\
[Install]\n\
WantedBy=multi-user.target\n\
[Unit]\n\
Description=node_exporter\n\
After=network.target \
>> /lib/systemd/system/node_exporter.service')
def killNodeExporter(self):
logger.info(f'{self.dnode_ip}: killing node_exporter')
self.dnode_conn.exec_cmd("ps -ef | grep -w node_exporter | grep -v grep | awk \'{print $2}\' | sudo xargs kill -9")
def deployNodeExporter(self):
logger.info(f'{self.dnode_ip}: deploying node_exporter')
self.killNodeExporter()
self.downloadNodeExporter()
tar_file_name = config["prometheus"]["node_exporter_addr"].split("/")[-1]
tar_file_dir = tar_file_name.replace(".tar.gz", "")
self.dnode_conn.exec_cmd(f'cd ~ && tar -xvf {tar_file_name} && cd {tar_file_dir} && cp -rf node_exporter /usr/local/bin')
self.configNodeExporterService()
self.dnode_conn.exec_cmd('sudo groupadd -r prometheus')
self.dnode_conn.exec_cmd('sudo useradd -r -g prometheus -s /sbin/nologin -M -c "prometheus Daemons" prometheus')
self.dnode_conn.exec_cmd('systemctl start node_exporter && systemctl enable node_exporter && systemctl status node_exporter')
def downloadProcessExporter(self):
tar_file_name = config["prometheus"]["process_exporter_addr"].split("/")[-1]
logger.info(f'{self.dnode_ip}: downloading process_exporter from {config["prometheus"]["process_exporter_addr"]}')
if not bool(int(self.dnode_conn.exec_cmd(f'[ -e ~/{tar_file_name} ] && echo 1 || echo 0'))):
self.dnode_conn.exec_cmd(f'wget -P ~ {config["prometheus"]["process_exporter_addr"]}')
def killProcessExporter(self):
logger.info(f'{self.dnode_ip}: killing process_exporter')
self.dnode_conn.exec_cmd("ps -ef | grep -w process_exporter | grep -v grep | awk \'{print $2}\' | sudo xargs kill -9")
def uploadProcessExporterYml(self, process_list):
logger.info(f'{self.dnode_ip}: generating process_exporter yml')
sub_list = list()
for process in process_list:
sub_list.append({'name':'{{.Comm}}', 'cmdline': [process]})
djson = {'process_names': sub_list}
dstr=json.dumps(djson)
dyml=yaml.load(dstr, Loader=yaml.FullLoader)
stream = open('process_name.yml', 'w')
yaml.safe_dump(dyml, stream, default_flow_style=False)
self.dnode_conn.upload_file(self.home_dir, 'process_name.yml')
def deployProcessExporter(self, process_list):
logger.info(f'{self.dnode_ip}: deploying process_exporter')
self.killProcessExporter()
self.downloadProcessExporter()
self.uploadProcessExporterYml(process_list)
tar_file_name = config["prometheus"]["process_exporter_addr"].split("/")[-1]
tar_file_dir = tar_file_name.replace(".tar.gz", "")
self.dnode_conn.exec_cmd(f'cd ~ && tar -xvf {tar_file_name} && mv -f ~/process_name.yml ~/{tar_file_dir}')
self.dnode_conn.exec_cmd(f'screen -d -m ~/{tar_file_dir}/process-exporter --config.path ~/{tar_file_dir}/process_name.yml')
def deployTaosd(self, firstEp=None, deploy_type="taosd"):
'''
deploy_type = taosd/taosadapter
'''
self.dnode_conn.upload_file(self.home_dir, self.install_package)
if config["clean_env"]:
self.rmTaosCfg()
self.rmTaosdLog()
self.rmTaosdData()
package_name = self.install_package.split("/")[-1]
package_dir = '-'.join(package_name.split("-", 3)[0:3])
self.stopTaosd()
self.killTaosd()
logger.info(f'{self.dnode_ip}: installing taosd')
logger.info(self.dnode_conn.exec_cmd(f'cd {self.home_dir} && tar -xvf {self.home_dir}/{package_name} && cd {package_dir} && yes|./install.sh'))
self.modifyTaosCfg(firstEp)
if deploy_type == "taosd":
self.startTaosd()
elif deploy_type == "taosadapter":
self.startTaosadapter()
if self.checkStatus(deploy_type):
logger.success(f'{self.dnode_ip}: {deploy_type} deploy success')
else:
logger.error(f'{self.dnode_ip}: {deploy_type} deploy failed, please check by manual')
sys.exit(1)
class Dnodes:
def __init__(self):
self.dnodes = list()
self.ip_list = list()
index = 1
for key in config:
if "taosd_dnode" in str(key):
self.dnodes.append(Dnode(index, config[key]["ip"], config[key]["port"], config[key]["username"], config[key]["password"]))
self.ip_list.append(config[key]["ip"])
index += 1
def installDnodesPackage(self):
for index in range(len(self.dnodes)):
self.dnodes[index].installPackage()
def rmDnodeTaosd(self, index):
self.dnodes[index - 1].rmTaosd()
def rmDnodeTaosdLog(self, index):
self.dnodes[index - 1].rmTaosdLog()
def rmDnodeTaosdData(self, index):
self.dnodes[index - 1].rmTaosdData()
def rmDnodeTaosCfg(self, index):
self.dnodes[index - 1].rmTaosCfg()
def modifyDnodeTaosCfg(self, index, taosCfgKey=None, taosCfgValue=None):
self.dnodes[index - 1].modifyTaosCfg(taosCfgKey, taosCfgValue)
def configDnodesHostname(self):
for index in range(len(self.dnodes)):
self.dnodes[index].configHostname()
def configDnodesHosts(self):
for index in range(len(self.dnodes)):
for ip in self.ip_list:
self.dnodes[index].configHosts(ip)
def startDnodeTaosd(self, index):
self.dnodes[index - 1].startTaosd()
def stopDnodeTaosd(self, index):
self.dnodes[index - 1].stopTaosd()
def killDnodeTaosd(self, index):
self.dnodes[index - 1].killTaosd()
def restartDnodeTaosd(self, index):
self.dnodes[index - 1].restartTaosd()
def startAllTaosd(self):
for index in range(len(self.dnodes)):
self.dnodes[index].startTaosd()
def stopAllTaosd(self):
for index in range(len(self.dnodes)):
self.dnodes[index].stopTaosd()
def killAllTaosd(self):
for index in range(len(self.dnodes)):
self.dnodes[index].stopTaosd()
def restartAllTaosd(self):
for index in range(len(self.dnodes)):
self.dnodes[index].restartTaosd()
def startNodeTaosadapter(self, index):
self.dnodes[index - 1].startTaosadapter()
def stopNodeTaosadapter(self, index):
self.dnodes[index - 1].stopTaosadapter()
def killNodeTaosadapter(self, index):
self.dnodes[index - 1].killTaosadapter()
def restartNodeTaosadapter(self, index):
self.dnodes[index - 1].restartTaosadapter()
def startAllTaosadapters(self):
for index in range(len(self.dnodes)):
self.dnodes[index].startTaosadapter()
def stopAllTaosadapters(self):
for index in range(len(self.dnodes)):
self.dnodes[index].stopTaosadapter()
def killAllTaosadapters(self):
for index in range(len(self.dnodes)):
self.dnodes[index].killTaosadapter()
def restartAllTaosadapters(self):
for index in range(len(self.dnodes)):
self.dnodes[index].restartTaosadapter()
def configDnodesHostname(self):
for index in range(len(self.dnodes)):
self.dnodes[index].configHostname()
def configDnodesHosts(self):
ip_hostname_dict = dict()
for index in range(len(self.dnodes)):
for ip in self.ip_list:
hostname = self.dnodes[index].getHostname(ip)
if hostname is not False:
ip_hostname_dict[ip] = hostname
for index in range(len(self.dnodes)):
for ip, hostname in ip_hostname_dict.items():
self.dnodes[index].configHosts(ip, hostname)
def deployNodes(self):
self.configDnodesHostname()
self.configDnodesHosts()
firstEp = f'{self.dnodes[0].configHostname()}:6030'
if not config["taosadapter_separate_deploy"] and not config["taosd_cluster"]:
self.dnodes[0].deployTaosd()
elif config["taosadapter_separate_deploy"] and not config["taosd_cluster"]:
for index in range(len(self.dnodes)):
if index == 0:
self.dnodes[index].deployTaosd(firstEp, "taosd")
else:
self.dnodes[index].deployTaosd(firstEp, "taosadapter")
else:
for index in range(len(self.dnodes)):
self.dnodes[index].deployTaosd(firstEp)
for index in range(len(self.dnodes)):
if index != 0:
self.dnodes[index].taoscCreateDnodes()
if __name__ == '__main__':
deploy = Dnodes()
deploy.deployNodes()
import sys
import json
sys.path.append("../../")
from config.env_init import *
from src.util.RemoteModule import RemoteModule
from src.common.dnodes import Dnodes, Dnode
class Monitor:
def __init__(self):
self.monitor_ip = config["prometheus"]["ip"]
self.monitor_port = config["prometheus"]["port"]
self.monitor_username = config["prometheus"]["username"]
self.monitor_password = config["prometheus"]["password"]
self.monitor_conn = RemoteModule(self.monitor_ip, self.monitor_port, self.monitor_username, self.monitor_password)
self.dnodes = list()
index = 1
for key in config:
if "taosd_dnode" in str(key):
self.dnodes.append(Dnode(index, config[key]["ip"], config[key]["port"], config[key]["username"], config[key]["password"]))
index += 1
if self.monitor_username == "root":
self.home_dir = "/root"
else:
self.home_dir = f"/home/{self.monitor_username}"
def installDnodesPackage(self):
for index in range(len(self.dnodes)):
self.dnodes[index].installPackage()
def deployAllNodeExporters(self):
for index in range(len(self.dnodes)):
self.dnodes[index].deployNodeExporter()
def deployAllProcessExporters(self):
for index in range(len(self.dnodes)):
if index == 0:
self.dnodes[index].deployProcessExporter(['taosd', 'taosadapter'])
else:
if config['taosd_cluster'] and config['taosadapter_separate_deploy']:
self.dnodes[index].deployProcessExporter(['taosd', 'taosadapter'])
elif config['taosd_cluster'] and not config['taosadapter_separate_deploy']:
self.dnodes[index].deployProcessExporter(['taosd'])
elif not config['taosd_cluster'] and config['taosadapter_separate_deploy']:
self.dnodes[index].deployProcessExporter(['taosadapter'])
else:
pass
def downloadPrometheus(self):
logger.info(f'{self.monitor_ip}: downloading prometheus from {config["prometheus"]["prometheus_addr"]}')
tar_file_name = config["prometheus"]["prometheus_addr"].split("/")[-1]
if not bool(int(self.monitor_conn.exec_cmd(f'[ -e ~/{tar_file_name} ] && echo 1 || echo 0'))):
self.monitor_conn.exec_cmd(f'wget -P ~ {config["prometheus"]["prometheus_addr"]}')
def killPrometheus(self):
logger.info(f'{self.monitor_ip}: killing prometheus')
self.monitor_conn.exec_cmd("ps -ef | grep -w prometheus | grep -v grep | awk \'{print $2}\' | sudo xargs kill -9")
def uploadPrometheusYml(self):
logger.info('generating prometheus yml')
scrape_configs = [{'job_name': 'prometheus', 'static_configs': [{'targets': ['localhost:9090']}]}]
for index in range(len(self.dnodes)):
if not config['taosd_cluster'] and not config['taosadapter_separate_deploy']:
pass
else:
scrape_configs.append({'job_name': f'{self.dnodes[index].dnode_ip}_sys', 'static_configs': [{'targets': [f'{self.dnodes[index].dnode_ip}:9100'], 'labels': {'instance': f'{self.dnodes[index].dnode_ip}_sys'}}]})
scrape_configs.append({'job_name': f'{self.dnodes[index].dnode_ip}', 'static_configs': [{'targets': [f'{self.dnodes[index].dnode_ip}:9256'], 'labels': {'instance': f'{self.dnodes[index].dnode_ip}'}}]})
djson = {'global': {'scrape_interval': config["prometheus"]["scrape_interval"], 'evaluation_interval': config["prometheus"]["evaluation_interval"], 'scrape_timeout': config["prometheus"]["scrape_timeout"]}, 'alerting': {'alertmanagers': [{'static_configs': [{'targets': None}]}]}, 'rule_files': None, 'scrape_configs': scrape_configs}
dstr=json.dumps(djson)
dyml=yaml.load(dstr, Loader=yaml.FullLoader)
stream = open('prometheus.yml', 'w')
yaml.safe_dump(dyml, stream, default_flow_style=False)
self.monitor_conn.upload_file(self.home_dir, 'prometheus.yml')
def deployPrometheus(self):
logger.info(f'{self.monitor_ip}: deploying prometheus')
self.installDnodesPackage()
self.killPrometheus()
self.downloadPrometheus()
self.uploadPrometheusYml()
tar_file_name = config["prometheus"]["prometheus_addr"].split("/")[-1]
tar_file_dir = tar_file_name.replace(".tar.gz", "")
self.monitor_conn.exec_cmd(f'cd ~ && tar -xvf {tar_file_name} && mv ~/prometheus.yml ~/{tar_file_dir}')
self.monitor_conn.exec_cmd(f'screen -d -m ~/{tar_file_dir}/prometheus --config.file={self.home_dir}/{tar_file_dir}/prometheus.yml')
def installGrafana(self):
logger.info(f'{self.monitor_ip}: installing grafana')
if bool(int(self.monitor_conn.exec_cmd(f'cat /etc/os-release | grep ubuntu >> /dev/null && echo 1 || echo 0'))):
if not bool(int(self.monitor_conn.exec_cmd(f'sudo dpkg -s grafana >> /dev/null && echo 1 || echo 0'))):
self.monitor_conn.exec_cmd('sudo apt-get install -y apt-transport-https')
self.monitor_conn.exec_cmd('sudo apt-get install -y software-properties-common wget')
self.monitor_conn.exec_cmd('wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add -')
self.monitor_conn.exec_cmd('echo "deb https://packages.grafana.com/oss/deb stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list')
self.monitor_conn.exec_cmd('apt-get update')
self.monitor_conn.exec_cmd('sudo apt-get -y install grafana')
elif bool(int(self.monitor_conn.exec_cmd(f'cat /etc/os-release | grep centos >> /dev/null && echo 1 || echo 0'))):
if not bool(int(self.monitor_conn.exec_cmd(f'sudo rpm -qa | grep grafana >> /dev/null && echo 1 || echo 0'))):
self.monitor_conn.exec_cmd('rm -rf /etc/yum.repos.d/grafana.repo')
self.monitor_conn.exec_cmd('sudo echo -e "[grafana]\nname=grafana\nbaseurl=https://packages.grafana.com/oss/rpm\nrepo_gpgcheck=1\nenabled=1\ngpgcheck=1\ngpgkey=https://packages.grafana.com/gpg.key\nsslverify=1\nsslcacert=/etc/pki/tls/certs/ca-bundle.crt" \
>> /etc/yum.repos.d/grafana.repo')
self.monitor_conn.exec_cmd('yum install -y grafana')
else:
pass
def deployGrafana(self):
self.installGrafana()
self.monitor_conn.exec_cmd('systemctl daemon-reload')
self.monitor_conn.exec_cmd('systemctl start grafana-server')
self.monitor_conn.exec_cmd('systemctl enable grafana-server.service')
self.monitor_conn.exec_cmd('systemctl status grafana-server')
if __name__ == '__main__':
deploy = Dnodes()
deploy.deployNodes()
monitor = Monitor()
monitor.deployAllNodeExporters()
monitor.deployAllProcessExporters()
monitor.deployPrometheus()
monitor.deployGrafana()
# -*-coding: utf-8-*-
from fabric import Connection
from config.env_init import *
class RemoteModule():
def __init__(self, ip, port, user, passwd):
self.ip = ip
self.port = port
self.user = user
self.passwd = passwd
def upload_file(self, remote_dir, upload_file):
"""
remote_dir: remote upload dir
upload_file: local file with path
"""
try:
logger.info(f'{self.ip}: uploading {upload_file} to {remote_dir}')
c = Connection(self.ip, user=self.user, port=self.port, connect_timeout=120, connect_kwargs={"password": self.passwd})
c.put(upload_file, remote_dir)
c.close()
except Exception as e:
logger.error(f"{upload_file} send failed----{e}, please check config/perf_test.yaml")
def download_file(self, remote_file_with_path, local_path):
"""
remote_file_with_path:: file with Absolute Path eg:/root/maple/bin/maple
local_path:: remote path eg:/root
"""
try:
c = Connection(self.ip, user=self.user, port=self.port, connect_timeout=120, connect_kwargs={"password": self.passwd})
c.get(remote_file_with_path, local_path)
c.close()
except Exception as e:
logger.error(f"download file {remote_file_with_path} failed:{e}");
def exec_cmd(self, cmd):
"""
cmd:: remote exec cmd
"""
try:
logger.info(f'{self.ip}: executing cmd: {cmd}')
c = Connection(self.ip, user=self.user, port=self.port, connect_timeout=120, connect_kwargs={"password": self.passwd})
result = c.run(cmd, pty=False, warn=True, hide=False)
c.close()
return result.stdout
except Exception as e:
logger.error(f"exec cmd {cmd} failed:{e}");
if __name__ == '__main__':
pass
\ No newline at end of file
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册