提交 849abb4b 编写于 作者: S shenglian zhou

Merge remote-tracking branch 'origin/develop' into fix/TD-12195

...@@ -4,8 +4,10 @@ import jenkins.model.CauseOfInterruption ...@@ -4,8 +4,10 @@ import jenkins.model.CauseOfInterruption
node { node {
} }
def skipbuild=0 def skipbuild = 0
def win_stop=0 def win_stop = 0
def scope = []
def mod = [0,1,2,3,4]
def abortPreviousBuilds() { def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME def currentJobName = env.JOB_NAME
...@@ -349,7 +351,7 @@ pipeline { ...@@ -349,7 +351,7 @@ pipeline {
} }
stages { stages {
stage('pre_build'){ stage('pre_build'){
agent{label 'master'} agent{label 'catalina'}
options { skipDefaultCheckout() } options { skipDefaultCheckout() }
when { when {
changeRequest() changeRequest()
...@@ -358,44 +360,32 @@ pipeline { ...@@ -358,44 +360,32 @@ pipeline {
script{ script{
abort_previous() abort_previous()
abortPreviousBuilds() abortPreviousBuilds()
} println env.CHANGE_BRANCH
// sh''' if(env.CHANGE_FORK){
// rm -rf ${WORKSPACE}.tes scope = ['connector','query','insert','other','tools','taosAdapter']
// cp -r ${WORKSPACE} ${WORKSPACE}.tes }
// cd ${WORKSPACE}.tes else{
// git fetch sh'''
// ''' cd ${WKC}
// script { git fetch
// if (env.CHANGE_TARGET == 'master') { git checkout ${CHANGE_BRANCH}
// sh ''' git pull
// git checkout master '''
// ''' dir('/var/lib/jenkins/workspace/TDinternal/community'){
// } gitlog = sh(script: "git log -1 --pretty=%B ", returnStdout:true)
// else if(env.CHANGE_TARGET == '2.0'){ println gitlog
// sh ''' if (!(gitlog =~ /\((.*?)\)/)){
// git checkout 2.0 autoCancelled = true
// ''' error('Aborting the build.')
// } }
// else{ temp = (gitlog =~ /\((.*?)\)/)
// sh ''' temp = temp[0].remove(1)
// git checkout develop scope = temp.split(",")
// ''' Collections.shuffle mod
// } }
// }
// sh'''
// git fetch origin +refs/pull/${CHANGE_ID}/merge
// git checkout -qf FETCH_HEAD
// '''
// script{ }
// skipbuild='2' }
// skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true)
// println skipbuild
// }
// sh'''
// rm -rf ${WORKSPACE}.tes
// '''
// }
} }
} }
stage('Parallel test stage') { stage('Parallel test stage') {
...@@ -408,239 +398,90 @@ pipeline { ...@@ -408,239 +398,90 @@ pipeline {
} }
} }
parallel { parallel {
stage('python_1_s1') { stage('python_1') {
agent{label " slave1 || slave11 "} agent{label " slave1 || slave6 || slave11 || slave16 "}
steps { steps {
pre_test() pre_test()
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
sh ''' script{
date scope.each {
cd ${WKC}/tests sh """
./test-all.sh p1 date
date''' cd ${WKC}/tests
} ./test-CI.sh ${it} 5 ${mod[0]}
date"""
}
}
}
} }
} }
stage('python_2_s5') { stage('python_2') {
agent{label " slave5 || slave15 "} agent{label " slave2 || slave7 || slave12 || slave17 "}
steps { steps {
pre_test() pre_test()
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
sh ''' script{
date scope.each {
cd ${WKC}/tests sh """
./test-all.sh p2 date
date''' cd ${WKC}/tests
} ./test-CI.sh ${it} 5 ${mod[1]}
} date"""
} }
stage('python_3_s6') { }
agent{label " slave6 || slave16 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
cd ${WKC}/tests
./test-all.sh p3
date'''
} }
} }
} }
stage('test_b1_s2') { stage('python_3') {
agent{label " slave2 || slave12 "} agent{label " slave3 || slave8 || slave13 ||slave18 "}
steps { steps {
timeout(time: 105, unit: 'MINUTES'){ timeout(time: 105, unit: 'MINUTES'){
pre_test() pre_test()
sh ''' script{
rm -rf /var/lib/taos/* scope.each {
rm -rf /var/log/taos/* sh """
nohup taosd >/dev/null & date
sleep 10 cd ${WKC}/tests
''' ./test-CI.sh ${it} 5 ${mod[2]}
date"""
sh ''' }
cd ${WKC}/src/connector/python
export PYTHONPATH=$PWD/
export LD_LIBRARY_PATH=${WKC}/debug/build/lib
pip3 install pytest
pytest tests/
python3 examples/bind-multi.py
python3 examples/bind-row.py
python3 examples/demo.py
python3 examples/insert-lines.py
python3 examples/pep-249.py
python3 examples/query-async.py
python3 examples/query-objectively.py
python3 examples/subscribe-sync.py
python3 examples/subscribe-async.py
'''
sh '''
cd ${WKC}/src/connector/nodejs
npm install
npm run test
cd ${WKC}/tests/examples/nodejs
npm install td2.0-connector > /dev/null 2>&1
node nodejsChecker.js host=localhost
node test1970.js
cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport
npm install td2.0-connector > /dev/null 2>&1
node nanosecondTest.js
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/src/connector/C#
dotnet test
dotnet run --project src/test/Cases/Cases.csproj
cd ${WKC}/tests/examples/C#
dotnet run --project C#checker/C#checker.csproj
dotnet run --project TDengineTest/TDengineTest.csproj
dotnet run --project schemaless/schemaless.csproj
cd ${WKC}/tests/examples/C#/taosdemo
dotnet build -c Release
tree | true
./bin/Release/net5.0/taosdemo -c /etc/taos -y
'''
} }
sh '''
cd ${WKC}/tests/gotest
bash batchtest.sh
'''
sh '''
cd ${WKC}/tests
./test-all.sh b1fq
date'''
} }
} }
} }
stage('test_crash_gen_s3') { stage('python_4') {
agent{label " slave3 || slave13 "} agent{label " slave4 || slave9 || slave14 || slave19 "}
steps { steps {
pre_test()
timeout(time: 60, unit: 'MINUTES'){
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
timeout(time: 60, unit: 'MINUTES'){
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_crash_gen_val_log.sh
'''
sh '''
cd ${WKC}/tests/pytest
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
./handle_taosd_val_log.sh
'''
}
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
./test-all.sh b2fq
date
'''
}
}
}
stage('test_valgrind_s4') {
agent{label " slave4 || slave14 "}
steps {
pre_test()
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
'''
}
timeout(time: 55, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
./test-all.sh b3fq
date'''
sh '''
date
cd ${WKC}/tests
./test-all.sh full example
date'''
}
}
}
stage('test_b4_s7') {
agent{label " slave7 || slave17 "}
steps {
timeout(time: 105, unit: 'MINUTES'){
pre_test() pre_test()
sh ''' script{
date scope.each {
cd ${WKC}/tests sh """
./test-all.sh b4fq date
cd ${WKC}/tests cd ${WKC}/tests
./test-all.sh p4 ./test-CI.sh ${it} 5 ${mod[3]}
''' date"""
// cd ${WKC}/tests }
// ./test-all.sh full jdbc }
// cd ${WKC}/tests
// ./test-all.sh full unit
}
}
}
stage('test_b5_s8') {
agent{label " slave8 || slave18 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
date
cd ${WKC}/tests
./test-all.sh b5fq
date'''
}
}
}
stage('test_b6_s9') {
agent{label " slave9 || slave19 "}
steps {
timeout(time: 55, unit: 'MINUTES'){
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh develop-test
'''
sh '''
date
cd ${WKC}/tests
./test-all.sh b6fq
date'''
} }
} }
} }
stage('test_b7_s10') { stage('python_5') {
agent{label " slave10 || slave20 "} agent{label " slave5 || slave10 || slave15 || slave20 "}
steps { steps {
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
pre_test() pre_test()
sh ''' script{
cd ${WKC}/tests scope.each {
./test-all.sh system-test sh """
''' date
sh ''' cd ${WKC}/tests
date ./test-CI.sh ${it} 5 ${mod[4]}
cd ${WKC}/tests date"""
./test-all.sh b7fq }
date''' }
} }
} }
} }
...@@ -813,3 +654,4 @@ pipeline { ...@@ -813,3 +654,4 @@ pipeline {
} }
} }
} }
...@@ -210,7 +210,7 @@ TDengine 分布式架构的逻辑结构图如下: ...@@ -210,7 +210,7 @@ TDengine 分布式架构的逻辑结构图如下:
![TDengine典型的操作流程](../images/architecture/message.png) ![TDengine典型的操作流程](../images/architecture/message.png)
<center> 图 2 TDengine 典型的操作流程 </center> <center> 图 2 TDengine 典型的操作流程 </center>
1. 应用通过 JDBC、ODBC 或其他API接口发起插入数据的请求。 1. 应用通过 JDBC 或其他API接口发起插入数据的请求。
2. taosc 会检查缓存,看是否保存有该表的 meta data。如果有,直接到第 4 步。如果没有,taosc 将向 mnode 发出 get meta-data 请求。 2. taosc 会检查缓存,看是否保存有该表的 meta data。如果有,直接到第 4 步。如果没有,taosc 将向 mnode 发出 get meta-data 请求。
3. mnode 将该表的 meta-data 返回给 taosc。Meta-data 包含有该表的 schema, 而且还有该表所属的 vgroup信息(vnode ID 以及所在的 dnode 的 End Point,如果副本数为 N,就有 N 组 End Point)。如果 taosc 迟迟得不到 mnode 回应,而且存在多个 mnode, taosc 将向下一个 mnode 发出请求。 3. mnode 将该表的 meta-data 返回给 taosc。Meta-data 包含有该表的 schema, 而且还有该表所属的 vgroup信息(vnode ID 以及所在的 dnode 的 End Point,如果副本数为 N,就有 N 组 End Point)。如果 taosc 迟迟得不到 mnode 回应,而且存在多个 mnode, taosc 将向下一个 mnode 发出请求。
4. taosc 向 master vnode 发起插入请求。 4. taosc 向 master vnode 发起插入请求。
...@@ -301,20 +301,6 @@ Master Vnode 遵循下面的写入流程: ...@@ -301,20 +301,6 @@ Master Vnode 遵循下面的写入流程:
与 master vnode 相比,slave vnode 不存在转发环节,也不存在回复确认环节,少了两步。但写内存与 WAL 是完全一样的。 与 master vnode 相比,slave vnode 不存在转发环节,也不存在回复确认环节,少了两步。但写内存与 WAL 是完全一样的。
### 异地容灾、IDC迁移
从上述 master 和 slave 流程可以看出,TDengine 采用的是异步复制的方式进行数据同步。这种方式能够大幅提高写入性能,网络延时对写入速度不会有大的影响。通过配置每个物理节点的IDC和机架号,可以保证对于一个虚拟节点组,虚拟节点由来自不同 IDC、不同机架的物理节点组成,从而实现异地容灾。因此 TDengine 原生支持异地容灾,无需再使用其他工具。
另一方面,TDengine 支持动态修改副本数,一旦副本数增加,新加入的虚拟节点将立即进入数据同步流程,同步结束后,新加入的虚拟节点即可提供服务。而在同步过程中,master 以及其他已经同步的虚拟节点都可以对外提供服务。利用这一特性,TDengine 可以实现无服务中断的 IDC 机房迁移。只需要将新 IDC 的物理节点加入现有集群,等数据同步完成后,再将老的 IDC 的物理节点从集群中剔除即可。
但是,这种异步复制的方式,存在极小的时间窗口,丢失写入的数据。具体场景如下:
1. master vnode 完成了它的 5 步操作,已经给 APP 确认写入成功,然后宕机
2. slave vnode 收到写入请求后,在第 2 步写入日志之前,处理失败
3. slave vnode 将成为新的 master,从而丢失了一条记录
理论上,只要是异步复制,就无法保证 100% 不丢失。但是这个窗口极小,master 与 slave 要同时发生故障,而且发生在刚给应用确认写入成功之后。
### 主从选择 ### 主从选择
Vnode 会保持一个数据版本号(version),对内存数据进行持久化存储时,对该版本号也进行持久化存储。每个数据更新操作,无论是采集的时序数据还是元数据,这个版本号将增加 1。 Vnode 会保持一个数据版本号(version),对内存数据进行持久化存储时,对该版本号也进行持久化存储。每个数据更新操作,无论是采集的时序数据还是元数据,这个版本号将增加 1。
......
...@@ -7,11 +7,21 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ ...@@ -7,11 +7,21 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
### 安装Grafana ### 安装Grafana
目前 TDengine 支持 Grafana 6.2 以上的版本。用户可以根据当前的操作系统,到 Grafana 官网下载安装包,并执行安装。下载地址如下:https://grafana.com/grafana/download 目前 TDengine 支持 Grafana 7.0 以上的版本。用户可以根据当前的操作系统,到 Grafana 官网下载安装包,并执行安装。下载地址如下:<https://grafana.com/grafana/download>
### 配置Grafana ### 配置Grafana
TDengine 的 Grafana 插件请从 <https://github.com/taosdata/grafanaplugin/releases/latest> 下载。 TDengine 的 Grafana 插件托管在GitHub,可从 <https://github.com/taosdata/grafanaplugin/releases/latest> 下载,当前最新版本为 3.1.3。
推荐使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件安装。
```bash
sudo -u grafana grafana-cli \
--pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip \
plugins install tdengine-datasource
```
或者下载到本地并解压到 Grafana 插件目录。
```bash ```bash
GF_VERSION=3.1.3 GF_VERSION=3.1.3
...@@ -31,11 +41,18 @@ Grafana 7.3+ / 8.x 版本会对插件进行签名检查,因此还需要在 gra ...@@ -31,11 +41,18 @@ Grafana 7.3+ / 8.x 版本会对插件进行签名检查,因此还需要在 gra
allow_loading_unsigned_plugins = tdengine-datasource allow_loading_unsigned_plugins = tdengine-datasource
``` ```
在Docker环境下,可以使用如下的环境变量设置自动安装并设置 TDengine 插件:
```bash
GF_INSTALL_PLUGINS=https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip;tdengine-datasource
GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
```
### 使用 Grafana ### 使用 Grafana
#### 配置数据源 #### 配置数据源
用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示: 用户可以直接通过 <http://localhost:3000> 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
![img](../images/connections/add_datasource1.jpg) ![img](../images/connections/add_datasource1.jpg)
...@@ -77,78 +94,3 @@ allow_loading_unsigned_plugins = tdengine-datasource ...@@ -77,78 +94,3 @@ allow_loading_unsigned_plugins = tdengine-datasource
在 2.3.3.0 及以上版本,您可以导入 TDinsight Dashboard (Grafana Dashboard ID: [15167](https://grafana.com/grafana/dashboards/15167)) 作为 TDengine 集群的监控可视化工具。安装和使用说明请见 [TDinsight 用户手册](https://www.taosdata.com/cn/documentation/tools/insight) 在 2.3.3.0 及以上版本,您可以导入 TDinsight Dashboard (Grafana Dashboard ID: [15167](https://grafana.com/grafana/dashboards/15167)) 作为 TDengine 集群的监控可视化工具。安装和使用说明请见 [TDinsight 用户手册](https://www.taosdata.com/cn/documentation/tools/insight)
## <a class="anchor" id="matlab"></a>MATLAB
MATLAB 可以通过安装包内提供的 JDBC Driver 直接连接到 TDengine 获取数据到本地工作空间。
### MATLAB 的 JDBC 接口适配
MATLAB 的适配有下面几个步骤,下面以 Windows 10 上适配 MATLAB2021a 为例:
- 将 TDengine 客户端安装路径下的 `\TDengine\connector\jdbc的驱动程序taos-jdbcdriver-2.0.25-dist.jar` 拷贝到 `${matlab_root}\MATLAB\R2021a\java\jar\toolbox`
- 将 TDengine 安装包内的 `taos.lib` 文件拷贝至 `${matlab_root_dir}\MATLAB\R2021\lib\win64`
- 将新添加的驱动 jar 包加入 MATLAB 的 classpath。在 `${matlab_root_dir}\MATLAB\R2021a\toolbox\local\classpath.txt` 文件中添加下面一行:
```
$matlabroot/java/jar/toolbox/taos-jdbcdriver-2.0.25-dist.jar
```
-`${user_home}\AppData\Roaming\MathWorks\MATLAB\R2021a\` 下添加一个文件 `javalibrarypath.txt`,并在该文件中添加 taos.dll 的路径,比如您的 taos.dll 是在安装时拷贝到了 `C:\Windows\System32` 下,那么就应该在 `javalibrarypath.txt` 中添加如下一行:
```
C:\Windows\System32
```
### 在 MATLAB 中连接 TDengine 获取数据
在成功进行了上述配置后,打开 MATLAB。
- 创建一个连接:
```matlab
conn = database(‘test’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://192.168.1.94:6030/’)
```
- 执行一次查询:
```matlab
sql0 = [‘select * from tb’]
data = select(conn, sql0);
```
- 插入一条记录:
```matlab
sql1 = [‘insert into tb values (now, 1)’]
exec(conn, sql1)
```
更多例子细节请参考安装包内 `examples\Matlab\TDengineDemo.m` 文件。
## <a class="anchor" id="r"></a>R
R语言支持通过JDBC接口来连接TDengine数据库。首先需要安装R语言的JDBC包。启动R语言环境,然后执行以下命令安装R语言的JDBC支持库:
```R
install.packages('RJDBC', repos='http://cran.us.r-project.org')
```
安装完成以后,通过执行`library('RJDBC')`命令加载 _RJDBC_ 包:
然后加载TDengine的JDBC驱动:
```R
drv<-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-2.0.0-dist.jar", identifier.quote="\"")
```
如果执行成功,不会出现任何错误信息。之后通过以下命令尝试连接数据库:
```R
conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","root","taosdata")
```
注意将上述命令中的IP地址替换成正确的IP地址。如果没有任务错误的信息,则连接数据库成功,否则需要根据错误提示调整连接的命令。TDengine支持以下的 _RJDBC_ 包中函数:
- dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE):将数据框iris写入表test中,overwrite必须设置为false,append必须设为TRUE,且数据框iris要与表test的结构一致。
- dbGetQuery(conn, "select count(*) from test"):查询语句。
- dbSendUpdate(conn, "use db"):执行任何非查询sql语句。例如dbSendUpdate(conn, "use db"), 写入数据dbSendUpdate(conn, "insert into t1 values(now, 99)")等。
- dbReadTable(conn, "test"):读取表test中数据。
- dbDisconnect(conn):关闭连接。
- dbRemoveTable(conn, "test"):删除表test。
TDengine客户端暂不支持如下函数:
- dbExistsTable(conn, "test"):是否存在表test。
- dbListTables(conn):显示连接中的所有表。
...@@ -222,7 +222,11 @@ taosd -C ...@@ -222,7 +222,11 @@ taosd -C
| 104 | maxWildCardsLength | | **C** | bytes | 设定 LIKE 算子的通配符字符串允许的最大长度 | 0-16384 | 100 | 2.1.6.1 版本新增。 | | 104 | maxWildCardsLength | | **C** | bytes | 设定 LIKE 算子的通配符字符串允许的最大长度 | 0-16384 | 100 | 2.1.6.1 版本新增。 |
| 105 | compressColData | | **S** | bytes | 客户端与服务器之间进行消息通讯过程中,对服务器端查询结果进行列压缩的阈值。 | 0: 对所有查询结果均进行压缩 >0: 查询结果中任意列大小超过该值的消息才进行压缩 -1: 不压缩 | -1 | 2.3.0.0 版本新增。 | | 105 | compressColData | | **S** | bytes | 客户端与服务器之间进行消息通讯过程中,对服务器端查询结果进行列压缩的阈值。 | 0: 对所有查询结果均进行压缩 >0: 查询结果中任意列大小超过该值的消息才进行压缩 -1: 不压缩 | -1 | 2.3.0.0 版本新增。 |
| 106 | tsdbMetaCompactRatio | | **C** | | tsdb meta文件中冗余数据超过多少阈值,开启meta文件的压缩功能 | 0:不开启,[1-100]:冗余数据比例 | 0 | | | 106 | tsdbMetaCompactRatio | | **C** | | tsdb meta文件中冗余数据超过多少阈值,开启meta文件的压缩功能 | 0:不开启,[1-100]:冗余数据比例 | 0 | |
| 107 | rpcForceTcp | | **SC**| | 强制使用TCP传输 | 0: 不开启 1: 开启 | 0 | 在网络比较差的环境中,建议开启。2.0版本新增。| | 107 | rpcForceTcp | | **SC**| | 强制使用TCP传输 | 0: 不开启 1: 开启 | 0 | 在网络比较差的环境中,建议开启。2.0版本新增。|
| 108 | maxNumOfDistinctRes | | **S**| | 允许返回的distinct结果最大行数 |默认值为10万,最大值1亿 | 10万 | 2.3版本新增。|
| 109 | clientMerge | | **C**| | 是否允许客户端对写入数据去重 |0:不开启,1:开启| 0 | 2.3版本新增。|
| 110 | httpDBNameMandatory | | **S**| | 是否在URL中输入 数据库名称|0:不开启,1:开启| 0 | 2.3版本新增。|
| 111 | maxRegexStringLen | | **C**| | 正则表达式最大允许长度 |默认值128,最大长度 16384 | 128 | 2.3版本新增。|
**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port) **注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)
...@@ -615,35 +619,6 @@ COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会 ...@@ -615,35 +619,6 @@ COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会
需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。 需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。
<a class="anchor" id="tsz_compress"></a>
## 浮点数有损压缩
在车联网等物联网智能应用场景中,经常会采集和存储海量的浮点数类型数据,如果能更高效地对此类数据进行压缩,那么不但能够节省数据存储的硬件资源,也能够因降低磁盘 I/O 数据量而提升系统性能表现。
从 2.1.6.0 版本开始,TDengine 提供一种名为 TSZ 的新型数据压缩算法,无论设置为有损压缩还是无损压缩,都能够显著提升浮点数类型数据的压缩率表现。目前该功能以可选模块的方式进行发布,可以通过添加特定的编译参数来启用该功能(也即常规安装包中暂未包含该功能)。
**需要注意的是,该功能一旦启用,效果是全局的,也即会对系统中所有的 FLOAT、DOUBLE 类型的数据生效。同时,在启用了浮点数有损压缩功能后写入的数据,也无法被未启用该功能的版本载入,并有可能因此而导致数据库服务报错退出。**
### 创建支持 TSZ 压缩算法的 TDengine 版本
TSZ 模块保存在单独的代码仓库 https://github.com/taosdata/TSZ 中。可以通过以下步骤创建包含此模块的 TDengine 版本:
1. TDengine 中的插件目前只支持通过 SSH 的方式拉取和编译,所以需要自己先配置好通过 SSH 拉取 GitHub 代码的环境。
2. `git clone git@github.com:taosdata/TDengine -b your_branchname --recurse-submodules` 通过 `--recurse-submodules` 使依赖模块的源代码可以被一并下载。
3. `mkdir debug && cd debug` 进入单独的编译目录。
4. `cmake .. -DTSZ_ENABLED=true` 其中参数 `-DTSZ_ENABLED=true` 表示在编译过程中加入对 TSZ 插件功能的支持。如果成功激活对 TSZ 模块的编译,那么 CMAKE 过程中也会显示 `build with TSZ enabled` 字样。
5. 编译成功后,包含 TSZ 浮点压缩功能的插件便已经编译进了 TDengine 中了,可以通过调整 taos.cfg 中的配置参数来使用此功能了。
### 通过配置文件来启用 TSZ 压缩算法
如果要启用 TSZ 压缩算法,除了在 TDengine 的编译过程需要声明启用 TSZ 模块之外,还需要在 taos.cfg 配置文件中对以下参数进行设置:
* lossyColumns:配置要进行有损压缩的浮点数数据类型。参数值类型为字符串,含义为:空 - 关闭有损压缩;float - 只对 FLOAT 类型进行有损压缩;double - 只对 DOUBLE 类型进行有损压缩;float|double:对 FLOAT 和 DOUBLE 类型都进行有损压缩。默认值是“空”,也即关闭有损压缩。
* fPrecision:设置 float 类型浮点数压缩精度,小于此值的浮点数尾数部分将被截断。参数值类型为 FLOAT,最小值为 0.0,最大值为 100,000.0。缺省值为 0.00000001(1E-8)。
* dPrecision:设置 double 类型浮点数压缩精度,小于此值的浮点数尾数部分将被截断。参数值类型为 DOUBLE,最小值为 0.0,最大值为 100,000.0。缺省值为 0.0000000000000001(1E-16)。
* maxRange:表示数据的最大浮动范围。一般无需调整,在数据具有特定特征时可以配合 range 参数来实现极高的数据压缩率。默认值为 500。
* range:表示数据大体浮动范围。一般无需调整,在数据具有特定特征时可以配合 maxRange 参数来实现极高的数据压缩率。默认值为 100。
**注意:**对 cfg 配置文件中参数值的任何调整,都需要重新启动 taosd 才能生效。并且以上选项为全局配置选项,配置后对所有数据库中所有表的 FLOAT 及 DOUBLE 类型的字段生效。
## <a class="anchor" id="directories"></a>文件目录结构 ## <a class="anchor" id="directories"></a>文件目录结构
安装TDengine后,默认会在操作系统中生成下列目录或文件: 安装TDengine后,默认会在操作系统中生成下列目录或文件:
...@@ -900,10 +875,14 @@ taosd 服务端日志文件标志位 debugflag 默认为 131,在 debug 时往 ...@@ -900,10 +875,14 @@ taosd 服务端日志文件标志位 debugflag 默认为 131,在 debug 时往
一旦设定为 135 或 143,日志文件增长很快,特别是写入、查询请求量较大时,增长速度惊人。如合并保存日志,很容易把日志内的关键信息(如配置信息、错误信息等)冲掉。为此,服务端将重要信息日志与其他日志分开存放: 一旦设定为 135 或 143,日志文件增长很快,特别是写入、查询请求量较大时,增长速度惊人。如合并保存日志,很容易把日志内的关键信息(如配置信息、错误信息等)冲掉。为此,服务端将重要信息日志与其他日志分开存放:
- taosinfo 存放重要信息日志 - taosinfo 存放重要信息日志, 包括:INFO/ERROR/WARNING 级别的日志信息。不记录DEBUG、TRACE级别的日志。
- taosdlog 存放其他日志 - taosdlog 服务器端生成的日志,记录taosinfo中全部信息外,还根据设置的日志输出级别,记录DEBUG(日志级别135)、TRACE(日志级别是 143)。
### 客户端日志
每个独立运行的客户端(一个进程)生成一个独立的客户端日志,其命名方式采用 taoslog+<序号> 的方式命名。文件标志位 debugflag 默认为 131,在 debug 时往往需要将其提升到 135 或 143 。
- taoslog 客户端(driver)生成的日志,默认记录客户端INFO/ERROR/WARNING 级别日志,还根据设置的日志输出级别,记录DEBUG(日志级别135)、TRACE(日志级别是 143)。
其中,taosinfo 日志文件最大长度由 numOfLogLines 来进行配置,一个 taosd 实例最多保留两个文件。 其中,日志文件最大长度由 numOfLogLines 来进行配置,一个 taosd 实例最多保留两个文件。
taosd 服务端日志采用异步落盘写入机制,优点是可以避免硬盘写入压力太大,对性能造成很大影响。缺点是,在极端情况下,存在少量日志行数丢失的可能。 taosd 服务端日志采用异步落盘写入机制,优点是可以避免硬盘写入压力太大,对性能造成很大影响。缺点是,在极端情况下,存在少量日志行数丢失的可能。
...@@ -83,7 +83,7 @@ TDengine 提供 3 个 UDF 的源代码示例,分别为: ...@@ -83,7 +83,7 @@ TDengine 提供 3 个 UDF 的源代码示例,分别为:
gcc -g -O0 -fPIC -shared add_one.c -o add_one.so gcc -g -O0 -fPIC -shared add_one.c -o add_one.so
``` ```
这样就准备好了动态链接库 add_one.so 文件,可以供后文创建 UDF 时使用了。 这样就准备好了动态链接库 add_one.so 文件,可以供后文创建 UDF 时使用了。为了保证可靠的系统运行,编译器 GCC 推荐使用 7.5及以上版本。
## 在系统中管理和使用 UDF ## 在系统中管理和使用 UDF
......
...@@ -48,7 +48,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 ...@@ -48,7 +48,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL | | 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | | 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | | 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | | 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL | | 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL |
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL | | 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL |
| 9 | BOOL | 1 | 布尔型,{true, false} | | 9 | BOOL | 1 | 布尔型,{true, false} |
...@@ -683,6 +683,48 @@ taos> SELECT SERVER_STATUS() AS status; ...@@ -683,6 +683,48 @@ taos> SELECT SERVER_STATUS() AS status;
Query OK, 1 row(s) in set (0.000081s) Query OK, 1 row(s) in set (0.000081s)
``` ```
函数_block_dist()使用说明
<br/>语法
SELECT _block_dist() FROM { tb_name | stb_name }
功能说明:获得指定的(超级)表的数据块分布信息
返回结果类型:字符串。
适用数据类型:不能输入任何参数。
嵌套子查询支持:不支持子查询或嵌套查询。
说明:
返回 FROM 子句中输入的表或超级表的数据块分布情况。不支持查询条件。
返回的结果是该表或超级表的数据块所包含的行数的数据分布直方图。
返回结果如下:
```
summary:
5th=[392], 10th=[392], 20th=[392], 30th=[392], 40th=[792], 50th=[792] 60th=[792], 70th=[792], 80th=[792], 90th=[792], 95th=[792], 99th=[792] Min=[392(Rows)] Max=[800(Rows)] Avg=[666(Rows)] Stddev=[2.17] Rows=[2000], Blocks=[3], Size=[5.440(Kb)] Comp=[0.23] RowsInMem=[0] SeekHeaderTime=[1(us)]
```
上述信息的说明如下:
<br/>1、查询的(超级)表所包含的存储在文件中的数据块(data block)中所包含的数据行的数量分布直方图信息:5%, 10%, 20%, 30%, 40%, 50%, 60%, 70%, 80%, 90%, 95%, 99% 的数值;
<br/>2、所有数据块中,包含行数最少的数据块所包含的行数量, 其中的 Min 指标 392 行。
<br/>3、所有数据块中,包含行数最多的数据块所包含的行数量, 其中的 Max 指标 800 行。
<br/>4、所有数据块行数的算数平均值 666行(其中的 Avg 项)。
<br/>5、所有数据块中行数分布的均方差为 2.17 ( stddev )。
<br/>6、数据块包含的行的总数为 2000 行(Rows)。
<br/>7、数据块总数是 3 个数据块 (Blocks)。
<br/>8、数据块占用磁盘空间大小 5.44 Kb (size)。
<br/>9、压缩后的数据块的大小除以原始数据的所获得的压缩比例: 23%(Comp),及压缩后的数据规模是原始数据规模的 23%。
<br/>10、内存中存在的数据行数是0,表示内存中没有数据缓存。
<br/>11、获取数据块信息的过程中读取头文件的时间开销 1 微秒(SeekHeaderTime)。
支持版本:指定计算算法的功能从2.1.0.x 版本开始,2.1.0.0之前的版本不支持指定使用算法的功能。
#### TAOS SQL中特殊关键词 #### TAOS SQL中特殊关键词
> TBNAME: 在超级表查询中可视为一个特殊的标签,代表查询涉及的子表名<br> > TBNAME: 在超级表查询中可视为一个特殊的标签,代表查询涉及的子表名<br>
...@@ -799,6 +841,7 @@ WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; ...@@ -799,6 +841,7 @@ WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
3. 暂不支持参与 JOIN 操作的表之间聚合后的四则运算。 3. 暂不支持参与 JOIN 操作的表之间聚合后的四则运算。
4. 不支持只对其中一部分表做 GROUP BY。 4. 不支持只对其中一部分表做 GROUP BY。
5. JOIN 查询的不同表的过滤条件之间不能为 OR。 5. JOIN 查询的不同表的过滤条件之间不能为 OR。
6. JOIN 查询要求连接条件不能是普通列,只能针对标签和主时间字段列(第一列)。
<a class="anchor" id="nested"></a> <a class="anchor" id="nested"></a>
### 嵌套查询 ### 嵌套查询
...@@ -1529,7 +1572,56 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ...@@ -1529,7 +1572,56 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
## <a class="anchor" id="aggregation"></a>按窗口切分聚合 ## <a class="anchor" id="aggregation"></a>按窗口切分聚合
TDengine 支持按时间段等窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这类聚合适合于降维(down sample)操作,语法如下: TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。
窗口子句用于针对查询的数据集合进行按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)三种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。
时间窗口
INTERVAL子句用于产生相等时间周期的窗口,SLIDING用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window )大小和每次前向增量时间(forward sliding times)。如图,[t0s, t0e] ,[t1s , t1e], [t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围sliding time标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当SLIDING与INTERVAL相等的时候,滑动窗口即为翻转窗口。
![时间窗口示意图](../images/sql/timewindow-1.png)
INTERVAL和SLIDING子句需要配合聚合和选择函数来使用。以下SQL语句非法:
```mysql
SELECT * FROM temp_table INTERVAL(1S)
```
SLIDING的向前滑动的时间不能超过一个窗口的时间范围。以下语句非法:
```mysql
SELECT COUNT(*) FROM temp_table INTERVAL(1D) SLIDING(2D)
```
当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。
* 聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。
* 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 1 微秒(1u),当然如果所查询的 DATABASE 的时间精度设置为毫秒级,那么允许的最短时间间隔为 1 毫秒(1a)。
* **注意**:用到 INTERVAL 语句时,除非极特殊的情况,都要求把客户端和服务端的 taos.cfg 配置文件中的 timezone 参数配置为相同的取值,以避免时间处理函数频繁进行跨时区转换而导致的严重性能影响。
状态窗口
使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用)
![时间窗口示意图](../images/sql/timewindow-2.png)
使用STATE_WINDOW来确定状态窗口划分的列。例如:
```mysql
SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status)
```
会话窗口
会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于12秒,则以下6条记录构成2个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30]和[2019-04-28 14:23:10,2019-04-28 14:23:30]。因为2019-04-28 14:22:30与2019-04-28 14:23:10之间的时间间隔是40秒,超过了连续时间间隔(12秒)。
![时间窗口示意图](../images/sql/timewindow-3.png)
在tol_value时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用)
```mysql
SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION_WINDOW(ts, tol_val)
```
这种类型的查询语法如下:
```mysql ```mysql
SELECT function_list FROM tb_name SELECT function_list FROM tb_name
...@@ -1547,12 +1639,8 @@ SELECT function_list FROM stb_name ...@@ -1547,12 +1639,8 @@ SELECT function_list FROM stb_name
``` ```
- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:TOP、BOTTOM、DIFF 以及四则运算)。 - 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:TOP、BOTTOM、DIFF 以及四则运算)。
- 查询过滤、聚合等操作按照每个切分窗口为独立的单位执行。聚合查询目前支持三种窗口的划分方式:
1. 时间窗口:聚合时间段的窗口宽度由关键词 INTERVAL 指定,最短时间间隔 10 毫秒(10a);并且支持偏移 offset(偏移必须小于间隔),也即时间窗口划分与“UTC 时刻 0”相比的偏移量。SLIDING 语句用于指定聚合时间段的前向增量,也即每次窗口向前滑动的时长。当 SLIDING 与 INTERVAL 取值相等的时候,滑动窗口即为翻转窗口。
* 从 2.1.5.0 版本开始,INTERVAL 语句允许的最短时间间隔调整为 1 微秒(1u),当然如果所查询的 DATABASE 的时间精度设置为毫秒级,那么允许的最短时间间隔为 1 毫秒(1a)。
* **注意:**用到 INTERVAL 语句时,除非极特殊的情况,都要求把客户端和服务端的 taos.cfg 配置文件中的 timezone 参数配置为相同的取值,以避免时间处理函数频繁进行跨时区转换而导致的严重性能影响。
2. 状态窗口:使用整数或布尔值来标识产生记录时设备的状态量,产生的记录如果具有相同的状态量取值则归属于同一个状态窗口,数值改变后该窗口关闭。状态量所对应的列作为 STATE_WINDOW 语句的参数来指定。(状态窗口暂不支持对超级表使用)
3. 会话窗口:时间戳所在的列由 SESSION 语句的 ts_col 参数指定,会话窗口根据相邻两条记录的时间戳差值来确定是否属于同一个会话——如果时间戳差异在 tol_val 以内,则认为记录仍属于同一个窗口;如果时间变化超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用)
- WHERE 语句可以指定查询的起止时间和其他过滤条件。 - WHERE 语句可以指定查询的起止时间和其他过滤条件。
- FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种: - FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:
1. 不进行填充:NONE(默认填充模式)。 1. 不进行填充:NONE(默认填充模式)。
...@@ -1590,7 +1678,7 @@ SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters ...@@ -1590,7 +1678,7 @@ SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters
- 表名最大长度为 192,每行数据最大长度 16k 个字符, 从 2.1.7.0 版本开始,每行数据最大长度 48k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 - 表名最大长度为 192,每行数据最大长度 16k 个字符, 从 2.1.7.0 版本开始,每行数据最大长度 48k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳。(从 2.1.7.0 版本开始,改为最多允许 4096 列) - 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳。(从 2.1.7.0 版本开始,改为最多允许 4096 列)
- 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符。 - 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符。
- SQL 语句最大长度 1048576 个字符,也可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576。 - SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576。
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列) - SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列)
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。
......
...@@ -33,8 +33,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ...@@ -33,8 +33,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
### 下载 TDengine 插件到 grafana 插件目录 ### 下载 TDengine 插件到 grafana 插件目录
```bash ```bash
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip
2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ 2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service 5. sudo systemctl restart grafana-server.service
......
...@@ -32,8 +32,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ...@@ -32,8 +32,8 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
### 复制 TDengine 插件到 grafana 插件目录 ### 复制 TDengine 插件到 grafana 插件目录
```bash ```bash
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip
2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ 2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service 5. sudo systemctl restart grafana-server.service
......
...@@ -6,25 +6,47 @@ TDengine can be quickly integrated with [Grafana](https://www.grafana.com/), an ...@@ -6,25 +6,47 @@ TDengine can be quickly integrated with [Grafana](https://www.grafana.com/), an
### Install Grafana ### Install Grafana
TDengine currently supports Grafana 6.2 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows: TDengine currently supports Grafana 7.0 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows: <https://grafana.com/grafana/download>.
https://grafana.com/grafana/download.
### Configure Grafana ### Configure Grafana
Download grafana plugin from <https://github.com/taosdata/grafanaplugin/releases/latest> . TDengine data source plugin for Grafana is hosted on GitHub, refer to GitHub latest release page <https://github.com/taosdata/grafanaplugin/releases/latest> to download the latest plugin package. Currently it's version 3.1.3 .
It is recommended to use [`grafana-cli` command line tool](https://grafana.com/docs/grafana/latest/administration/cli/) to install the plugin.
```bash
sudo -u grafana grafana-cli \
--pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip \
plugins install tdengine-datasource
```
Users could manually download the plugin package and install it to Grafana plugins directory.
```bash ```bash
GF_VERSION=3.1.3 GF_VERSION=3.1.3
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
``` ```
Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana. Taking Centos 7.2 as an example, just unpack the package to /var/lib/grafana/plugins directory and restart Grafana.
```bash ```bash
sudo unzip tdengine-datasource-$GF_VERSION.zip /var/lib/grafana/plugins/ sudo unzip tdengine-datasource-$GF_VERSION.zip /var/lib/grafana/plugins/
``` ```
Grafana will check the signature after 7.3 and 8.x for security. Users need additional configurations in `grafana.ini` file to allow unsigned plugins like TDengine data source.
```ini
[plugins]
allow_loading_unsigned_plugins = tdengine-datasource
```
In docker/compose/k8s, simply setting the two environment variables will take it all for you.
```bash
GF_INSTALL_PLUGINS=https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip;tdengine-datasource
GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
```
### Use Grafana ### Use Grafana
#### Configure data source #### Configure data source
......
...@@ -33,8 +33,8 @@ Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official ...@@ -33,8 +33,8 @@ Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official
### Download TDengine plugin to Grafana plugin's directory ### Download TDengine plugin to Grafana plugin's directory
```bash ```bash
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip
2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ 2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service 5. sudo systemctl restart grafana-server.service
......
...@@ -32,8 +32,8 @@ Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official ...@@ -32,8 +32,8 @@ Please download TDengine 2.3.0.0 or the above version from TAOS Data's [official
### Download TDengine plugin to Grafana plugin's directory ### Download TDengine plugin to Grafana plugin's directory
```bash ```bash
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip 1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip
2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ 2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine 3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini 4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
5. sudo systemctl restart grafana-server.service 5. sudo systemctl restart grafana-server.service
......
...@@ -43,9 +43,11 @@ if [ "$osType" != "Darwin" ]; then ...@@ -43,9 +43,11 @@ if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
#strip ${build_dir}/bin/taosd #strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" bin_files="${build_dir}/bin/taos \
${script_dir}/remove_client.sh"
else else
bin_files="${script_dir}/remove_client.sh \ bin_files="${build_dir}/bin/taos \
${script_dir}/remove_client.sh \
${script_dir}/set_core.sh \ ${script_dir}/set_core.sh \
${script_dir}/get_client.sh" ${script_dir}/get_client.sh"
#${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb" #${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb"
......
...@@ -128,12 +128,13 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { ...@@ -128,12 +128,13 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
// type length // type length
int32_t bytes = pSchema[i].bytes; int32_t bytes = pSchema[i].bytes;
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 2); pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 2);
if (pSchema[i].type == TSDB_DATA_TYPE_BINARY || pSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
if (pSchema[i].type == TSDB_DATA_TYPE_BINARY){
bytes -= VARSTR_HEADER_SIZE; bytes -= VARSTR_HEADER_SIZE;
}
if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { else if(pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) {
bytes = bytes / TSDB_NCHAR_SIZE; bytes -= VARSTR_HEADER_SIZE;
} bytes = bytes / TSDB_NCHAR_SIZE;
} }
*(int32_t *)(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 2) * totalNumOfRows + pField->bytes * i) = bytes; *(int32_t *)(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 2) * totalNumOfRows + pField->bytes * i) = bytes;
......
...@@ -4519,13 +4519,16 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr, ...@@ -4519,13 +4519,16 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr,
if (TSDB_FUNC_IS_SCALAR(functionId)) { if (TSDB_FUNC_IS_SCALAR(functionId)) {
code = validateSQLExprItem(pCmd, pParamElem->pNode, pQueryInfo, pList, childrenTypes + i, uid, childrenHeight+i); code = validateSQLExprItem(pCmd, pParamElem->pNode, pQueryInfo, pList, childrenTypes + i, uid, childrenHeight+i);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
free(childrenTypes); tfree(childrenTypes);
tfree(childrenHeight);
return code; return code;
} }
} }
if (!TSDB_FUNC_IS_SCALAR(functionId) && if (!TSDB_FUNC_IS_SCALAR(functionId) &&
(pParamElem->pNode->type == SQL_NODE_EXPR || pParamElem->pNode->type == SQL_NODE_SQLFUNCTION)) { (pParamElem->pNode->type == SQL_NODE_EXPR || pParamElem->pNode->type == SQL_NODE_SQLFUNCTION)) {
tfree(childrenTypes);
tfree(childrenHeight);
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
} }
...@@ -4547,6 +4550,8 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr, ...@@ -4547,6 +4550,8 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr,
*height = maxChildrenHeight + 1; *height = maxChildrenHeight + 1;
if (anyChildAgg && anyChildScalar) { if (anyChildAgg && anyChildScalar) {
tfree(childrenTypes);
tfree(childrenHeight);
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
} }
if (anyChildAgg) { if (anyChildAgg) {
...@@ -4558,7 +4563,8 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr, ...@@ -4558,7 +4563,8 @@ static int32_t validateSQLExprItemSQLFunc(SSqlCmd* pCmd, tSqlExpr* pExpr,
*type = SQLEXPR_TYPE_AGG; *type = SQLEXPR_TYPE_AGG;
} }
} }
free(childrenTypes); tfree(childrenTypes);
tfree(childrenHeight);
//end if param list is not null //end if param list is not null
} else { } else {
if (TSDB_FUNC_IS_SCALAR(functionId)) { if (TSDB_FUNC_IS_SCALAR(functionId)) {
...@@ -6869,7 +6875,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { ...@@ -6869,7 +6875,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tscError("json type error, should be string"); tscError("json type error, should be string");
return invalidOperationMsg(pMsg, msg25); return invalidOperationMsg(pMsg, msg25);
} }
if (pItem->pVar.nType > TSDB_MAX_JSON_TAGS_LEN / TSDB_NCHAR_SIZE) { if (pItem->pVar.nLen > TSDB_MAX_JSON_TAGS_LEN / TSDB_NCHAR_SIZE) {
tscError("json tag too long"); tscError("json tag too long");
return invalidOperationMsg(pMsg, msg14); return invalidOperationMsg(pMsg, msg14);
} }
......
...@@ -774,11 +774,12 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo ...@@ -774,11 +774,12 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo
memcpy(dst, p, varDataTLen(p)); memcpy(dst, p, varDataTLen(p));
} else if (varDataLen(p) > 0) { } else if (varDataLen(p) > 0) {
int32_t length = taosUcs4ToMbs(varDataVal(p), varDataLen(p), varDataVal(dst)); int32_t length = taosUcs4ToMbs(varDataVal(p), varDataLen(p), varDataVal(dst));
varDataSetLen(dst, length); if (length <= 0) {
if (length == 0) {
tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p);
} }
if (length >= 0){
varDataSetLen(dst, length);
}
} else { } else {
varDataSetLen(dst, 0); varDataSetLen(dst, 0);
} }
...@@ -809,18 +810,23 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo ...@@ -809,18 +810,23 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo
varDataSetLen(dst, strlen(varDataVal(dst))); varDataSetLen(dst, strlen(varDataVal(dst)));
}else if (type == TSDB_DATA_TYPE_JSON) { }else if (type == TSDB_DATA_TYPE_JSON) {
int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), varDataVal(dst)); int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), varDataVal(dst));
varDataSetLen(dst, length);
if (length == 0) { if (length <= 0) {
tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p);
} }
if (length >= 0){
varDataSetLen(dst, length);
}
}else if (type == TSDB_DATA_TYPE_NCHAR) { // value -> "value" }else if (type == TSDB_DATA_TYPE_NCHAR) { // value -> "value"
*(char*)varDataVal(dst) = '\"'; *(char*)varDataVal(dst) = '\"';
int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), POINTER_SHIFT(varDataVal(dst), CHAR_BYTES)); int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), POINTER_SHIFT(varDataVal(dst), CHAR_BYTES));
*(char*)(POINTER_SHIFT(varDataVal(dst), length + CHAR_BYTES)) = '\"'; if (length <= 0) {
varDataSetLen(dst, length + CHAR_BYTES*2);
if (length == 0) {
tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p);
} }
if (length >= 0){
varDataSetLen(dst, length + CHAR_BYTES*2);
*(char*)(POINTER_SHIFT(varDataVal(dst), length + CHAR_BYTES)) = '\"';
}
}else if (type == TSDB_DATA_TYPE_DOUBLE) { }else if (type == TSDB_DATA_TYPE_DOUBLE) {
double jsonVd = *(double*)(realData); double jsonVd = *(double*)(realData);
sprintf(varDataVal(dst), "%.9lf", jsonVd); sprintf(varDataVal(dst), "%.9lf", jsonVd);
...@@ -5515,10 +5521,10 @@ int parseJsontoTagData(char* json, SKVRowBuilder* kvRowBuilder, char* errMsg, in ...@@ -5515,10 +5521,10 @@ int parseJsontoTagData(char* json, SKVRowBuilder* kvRowBuilder, char* errMsg, in
char *tagVal = calloc(strlen(jsonValue) * TSDB_NCHAR_SIZE + TSDB_NCHAR_SIZE, 1); char *tagVal = calloc(strlen(jsonValue) * TSDB_NCHAR_SIZE + TSDB_NCHAR_SIZE, 1);
*tagVal = jsonType2DbType(0, item->type); // type *tagVal = jsonType2DbType(0, item->type); // type
char* tagData = POINTER_SHIFT(tagVal,CHAR_BYTES); char* tagData = POINTER_SHIFT(tagVal,CHAR_BYTES);
if (!taosMbsToUcs4(jsonValue, strlen(jsonValue), varDataVal(tagData), if (strlen(jsonValue) > 0 && !taosMbsToUcs4(jsonValue, strlen(jsonValue), varDataVal(tagData),
(int32_t)(strlen(jsonValue) * TSDB_NCHAR_SIZE), &outLen)) { (int32_t)(strlen(jsonValue) * TSDB_NCHAR_SIZE), &outLen)) {
tscError("json string error:%s|%s", strerror(errno), jsonValue); tscError("charset:%s to %s. val:%s, errno:%s, convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, jsonValue, strerror(errno));
retCode = tscSQLSyntaxErrMsg(errMsg, "serizelize json error", NULL); retCode = tscSQLSyntaxErrMsg(errMsg, "charset convert json error", NULL);
free(tagVal); free(tagVal);
goto end; goto end;
} }
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include "tskiplist.h" #include "tskiplist.h"
#include "texpr.h" #include "texpr.h"
#include "tarithoperator.h" #include "tarithoperator.h"
#include "tulog.h"
static int32_t exprValidateMathNode(tExprNode *pExpr); static int32_t exprValidateMathNode(tExprNode *pExpr);
static int32_t exprValidateStringConcatNode(tExprNode *pExpr); static int32_t exprValidateStringConcatNode(tExprNode *pExpr);
...@@ -1274,6 +1275,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out ...@@ -1274,6 +1275,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out
} else if (inputType == TSDB_DATA_TYPE_NCHAR) { } else if (inputType == TSDB_DATA_TYPE_NCHAR) {
char *newColData = calloc(1, outputBytes * TSDB_NCHAR_SIZE + 1); char *newColData = calloc(1, outputBytes * TSDB_NCHAR_SIZE + 1);
int len = taosUcs4ToMbs(varDataVal(input), varDataLen(input), newColData); int len = taosUcs4ToMbs(varDataVal(input), varDataLen(input), newColData);
if (len < 0){
uError("castConvert taosUcs4ToMbs error 1");
tfree(newColData);
return;
}
newColData[len] = 0; newColData[len] = 0;
*(int64_t *)output = strtoll(newColData, NULL, 10); *(int64_t *)output = strtoll(newColData, NULL, 10);
tfree(newColData); tfree(newColData);
...@@ -1291,6 +1297,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out ...@@ -1291,6 +1297,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out
} else if (inputType == TSDB_DATA_TYPE_NCHAR) { } else if (inputType == TSDB_DATA_TYPE_NCHAR) {
char *newColData = calloc(1, outputBytes * TSDB_NCHAR_SIZE + 1); char *newColData = calloc(1, outputBytes * TSDB_NCHAR_SIZE + 1);
int len = taosUcs4ToMbs(varDataVal(input), varDataLen(input), newColData); int len = taosUcs4ToMbs(varDataVal(input), varDataLen(input), newColData);
if (len < 0){
uError("castConvert taosUcs4ToMbs error 2");
tfree(newColData);
return;
}
newColData[len] = 0; newColData[len] = 0;
*(int64_t *)output = strtoull(newColData, NULL, 10); *(int64_t *)output = strtoull(newColData, NULL, 10);
tfree(newColData); tfree(newColData);
...@@ -1332,11 +1343,19 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out ...@@ -1332,11 +1343,19 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out
if (inputType == TSDB_DATA_TYPE_BOOL) { if (inputType == TSDB_DATA_TYPE_BOOL) {
char tmp[8] = {0}; char tmp[8] = {0};
int32_t len = sprintf(tmp, "%.*s", ncharSize, *(int8_t*)input ? "true" : "false"); int32_t len = sprintf(tmp, "%.*s", ncharSize, *(int8_t*)input ? "true" : "false");
taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); bool ret = taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len);
if(!ret) {
uError("castConvert1 taosMbsToUcs4 error");
return;
}
varDataSetLen(output, len); varDataSetLen(output, len);
} else if (inputType == TSDB_DATA_TYPE_BINARY) { } else if (inputType == TSDB_DATA_TYPE_BINARY) {
int32_t len = ncharSize > varDataLen(input) ? varDataLen(input) : ncharSize; int32_t len = ncharSize > varDataLen(input) ? varDataLen(input) : ncharSize;
taosMbsToUcs4(input + VARSTR_HEADER_SIZE, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); bool ret = taosMbsToUcs4(input + VARSTR_HEADER_SIZE, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len);
if(!ret) {
uError("castConvert2 taosMbsToUcs4 error");
return;
}
varDataSetLen(output, len); varDataSetLen(output, len);
} else if (inputType == TSDB_DATA_TYPE_TIMESTAMP) { } else if (inputType == TSDB_DATA_TYPE_TIMESTAMP) {
assert(0); assert(0);
...@@ -1348,7 +1367,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out ...@@ -1348,7 +1367,11 @@ void castConvert(int16_t inputType, int16_t inputBytes, char *input, int16_t Out
char tmp[400] = {0}; char tmp[400] = {0};
NUM_TO_STRING(inputType, input, sizeof(tmp), tmp); NUM_TO_STRING(inputType, input, sizeof(tmp), tmp);
int32_t len = (int32_t)(ncharSize > strlen(tmp) ? strlen(tmp) : ncharSize); int32_t len = (int32_t)(ncharSize > strlen(tmp) ? strlen(tmp) : ncharSize);
taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len); bool ret = taosMbsToUcs4(tmp, len, varDataVal(output), outputBytes - VARSTR_HEADER_SIZE, &len);
if(!ret) {
uError("castConvert3 taosMbsToUcs4 error");
return;
}
varDataSetLen(output, len); varDataSetLen(output, len);
} }
break; break;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "ttype.h" #include "ttype.h"
#include "tutil.h" #include "tutil.h"
#include "tvariant.h" #include "tvariant.h"
#include "tulog.h"
#define SET_EXT_INFO(converted, res, minv, maxv, exti) do { \ #define SET_EXT_INFO(converted, res, minv, maxv, exti) do { \
if (converted == NULL || exti == NULL || *converted == false) { break; } \ if (converted == NULL || exti == NULL || *converted == false) { break; } \
...@@ -359,8 +360,12 @@ int32_t tVariantToString(tVariant *pVar, char *dst) { ...@@ -359,8 +360,12 @@ int32_t tVariantToString(tVariant *pVar, char *dst) {
case TSDB_DATA_TYPE_NCHAR: { case TSDB_DATA_TYPE_NCHAR: {
dst[0] = '\''; dst[0] = '\'';
taosUcs4ToMbs(pVar->wpz, (twcslen(pVar->wpz) + 1) * TSDB_NCHAR_SIZE, dst + 1); int32_t len = taosUcs4ToMbs(pVar->wpz, (twcslen(pVar->wpz) + 1) * TSDB_NCHAR_SIZE, dst + 1);
int32_t len = (int32_t)strlen(dst); if (len < 0){
uError("castConvert1 taosUcs4ToMbs error");
return 0 ;
}
len = (int32_t)strlen(dst);
dst[len] = '\''; dst[len] = '\'';
dst[len + 1] = 0; dst[len + 1] = 0;
return len + 1; return len + 1;
...@@ -428,11 +433,17 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) { ...@@ -428,11 +433,17 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
pBuf = realloc(pBuf, newSize + 1); pBuf = realloc(pBuf, newSize + 1);
} }
taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, pBuf); int32_t len = taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, pBuf);
if (len < 0){
uError("castConvert1 taosUcs4ToMbs error");
}
free(pVariant->wpz); free(pVariant->wpz);
pBuf[newSize] = 0; pBuf[newSize] = 0;
} else { } else {
taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, *pDest); int32_t len = taosUcs4ToMbs(pVariant->wpz, (int32_t)newSize, *pDest);
if (len < 0){
uError("castConvert1 taosUcs4ToMbs error");
}
} }
} else { } else {
......
...@@ -398,5 +398,8 @@ namespace TDengineDriver ...@@ -398,5 +398,8 @@ namespace TDengineDriver
IntPtr stmtErrPrt = StmtErrPtr(stmt); IntPtr stmtErrPrt = StmtErrPtr(stmt);
return Marshal.PtrToStringAnsi(stmtErrPrt); return Marshal.PtrToStringAnsi(stmtErrPrt);
} }
[DllImport("taos", EntryPoint = "taos_fetch_lengths", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FetchLengths(IntPtr taos);
} }
} }
...@@ -9,4 +9,8 @@ ...@@ -9,4 +9,8 @@
<TargetFramework>net5.0</TargetFramework> <TargetFramework>net5.0</TargetFramework>
</PropertyGroup> </PropertyGroup>
<PropertyGroup>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<DocumentationFile>..\doc\FunctionTest.XML</DocumentationFile>
</PropertyGroup>
</Project> </Project>
using System;
using Test.UtilsTools;
using System.Collections.Generic;
namespace Cases
{
public class FetchLengthCase
{
/// <author>xiaolei</author>
/// <Name>TestRetrieveBinary</Name>
/// <describe>TD-12103 C# connector fetch_row with binary data retrieving error</describe>
/// <filename>FetchLength.cs</filename>
/// <result>pass or failed </result>
public void TestRetrieveBinary(IntPtr conn)
{
string sql1 = "create stable stb1 (ts timestamp, name binary(10)) tags(n int);";
string sql2 = "insert into tb1 using stb1 tags(1) values(now, 'log');";
string sql3 = "insert into tb2 using stb1 tags(2) values(now, 'test');";
string sql4 = "insert into tb3 using stb1 tags(3) values(now, 'db02');";
string sql5 = "insert into tb4 using stb1 tags(4) values(now, 'db3');";
string sql6 = "select distinct(name) from stb1;";//
UtilsTools.ExecuteQuery(conn, sql1);
UtilsTools.ExecuteQuery(conn, sql2);
UtilsTools.ExecuteQuery(conn, sql3);
UtilsTools.ExecuteQuery(conn, sql4);
UtilsTools.ExecuteQuery(conn, sql5);
IntPtr resPtr = IntPtr.Zero;
resPtr = UtilsTools.ExecuteQuery(conn, sql6);
List<List<string>> result = UtilsTools.GetResultSet(resPtr);
List<string> colname = result[0];
List<string> data = result[1];
UtilsTools.AssertEqual("db3", data[0]);
UtilsTools.AssertEqual("log", data[1]);
UtilsTools.AssertEqual("db02", data[2]);
UtilsTools.AssertEqual("test", data[3]);
}
}
}
using System; using System;
using Test.UtilsTools; using Test.UtilsTools;
using Cases; using Cases;
namespace Cases.EntryPoint namespace Cases.EntryPoint
{ {
class Program class Program
{ {
static void Main(string[] args) static void Main(string[] args)
{ {
IntPtr conn = IntPtr.Zero; IntPtr conn = IntPtr.Zero;
IntPtr stmt = IntPtr.Zero; IntPtr stmt = IntPtr.Zero;
IntPtr res = IntPtr.Zero; IntPtr res = IntPtr.Zero;
conn = UtilsTools.TDConnection("127.0.0.1", "root", "taosdata", "", 0); conn = UtilsTools.TDConnection("127.0.0.1", "root", "taosdata", "", 0);
UtilsTools.ExecuteQuery(conn, "drop database if exists csharp"); UtilsTools.ExecuteQuery(conn, "drop database if exists csharp");
UtilsTools.ExecuteQuery(conn, "create database if not exists csharp keep 3650"); UtilsTools.ExecuteQuery(conn, "create database if not exists csharp keep 3650");
UtilsTools.ExecuteQuery(conn, "use csharp"); UtilsTools.ExecuteQuery(conn, "use csharp");
Console.WriteLine("====================StableColumnByColumn==================="); Console.WriteLine("====================StableColumnByColumn===================");
StableColumnByColumn columnByColumn = new StableColumnByColumn(); StableColumnByColumn columnByColumn = new StableColumnByColumn();
columnByColumn.Test(conn, "stablecolumnbycolumn"); columnByColumn.Test(conn, "stablecolumnbycolumn");
Console.WriteLine("====================StmtStableQuery==================="); Console.WriteLine("====================StmtStableQuery===================");
StmtStableQuery stmtStableQuery = new StmtStableQuery(); StmtStableQuery stmtStableQuery = new StmtStableQuery();
stmtStableQuery.Test(conn, "stablecolumnbycolumn"); stmtStableQuery.Test(conn, "stablecolumnbycolumn");
Console.WriteLine("====================StableMutipleLine==================="); Console.WriteLine("====================StableMutipleLine===================");
StableMutipleLine mutipleLine = new StableMutipleLine(); StableMutipleLine mutipleLine = new StableMutipleLine();
mutipleLine.Test(conn, "stablemutipleline"); mutipleLine.Test(conn, "stablemutipleline");
//================================================================================ //================================================================================
Console.WriteLine("====================NtableSingleLine==================="); Console.WriteLine("====================NtableSingleLine===================");
NtableSingleLine ntableSingleLine = new NtableSingleLine(); NtableSingleLine ntableSingleLine = new NtableSingleLine();
ntableSingleLine.Test(conn, "stablesingleline"); ntableSingleLine.Test(conn, "stablesingleline");
Console.WriteLine("====================NtableMutipleLine==================="); Console.WriteLine("====================NtableMutipleLine===================");
NtableMutipleLine ntableMutipleLine = new NtableMutipleLine(); NtableMutipleLine ntableMutipleLine = new NtableMutipleLine();
ntableMutipleLine.Test(conn, "ntablemutipleline"); ntableMutipleLine.Test(conn, "ntablemutipleline");
Console.WriteLine("====================StmtNtableQuery==================="); Console.WriteLine("====================StmtNtableQuery===================");
StmtNtableQuery stmtNtableQuery = new StmtNtableQuery(); StmtNtableQuery stmtNtableQuery = new StmtNtableQuery();
stmtNtableQuery.Test(conn, "ntablemutipleline"); stmtNtableQuery.Test(conn, "ntablemutipleline");
Console.WriteLine("====================NtableColumnByColumn==================="); Console.WriteLine("====================NtableColumnByColumn===================");
NtableColumnByColumn ntableColumnByColumn = new NtableColumnByColumn(); NtableColumnByColumn ntableColumnByColumn = new NtableColumnByColumn();
ntableColumnByColumn.Test(conn, "ntablecolumnbycolumn"); ntableColumnByColumn.Test(conn, "ntablecolumnbycolumn");
Console.WriteLine("====================fetchfeilds==================="); Console.WriteLine("====================fetchfeilds===================");
FetchFields fetchFields = new FetchFields(); FetchFields fetchFields = new FetchFields();
fetchFields.Test(conn,"fetchfeilds"); fetchFields.Test(conn, "fetchfeilds");
Console.WriteLine("===================JsonTagTest===================="); Console.WriteLine("===================JsonTagTest====================");
JsonTagTest jsonTagTest = new JsonTagTest(); JsonTagTest jsonTagTest = new JsonTagTest();
jsonTagTest.Test(conn); jsonTagTest.Test(conn);
// UtilsTools.ExecuteQuery(conn, "drop database if exists csharp"); Console.WriteLine("====================fetchLengthCase===================");
UtilsTools.CloseConnection(conn); FetchLengthCase fetchLengthCase = new FetchLengthCase();
UtilsTools.ExitProgram(); fetchLengthCase.TestRetrieveBinary(conn);
UtilsTools.ExecuteQuery(conn, "drop database if exists csharp");
} UtilsTools.CloseConnection(conn);
} UtilsTools.ExitProgram();
}
}
}
}
...@@ -35,7 +35,6 @@ namespace Test.UtilsTools ...@@ -35,7 +35,6 @@ namespace Test.UtilsTools
else else
{ {
Console.WriteLine(sql.ToString() + " success"); Console.WriteLine(sql.ToString() + " success");
} }
return res; return res;
} }
...@@ -83,9 +82,13 @@ namespace Test.UtilsTools ...@@ -83,9 +82,13 @@ namespace Test.UtilsTools
IntPtr rowdata; IntPtr rowdata;
StringBuilder builder = new StringBuilder(); StringBuilder builder = new StringBuilder();
while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero)
{ {
queryRows++; queryRows++;
IntPtr colLengthPtr = TDengine.FetchLengths(res);
int[] colLengthArr = new int[fieldCount];
Marshal.Copy(colLengthPtr, colLengthArr, 0, fieldCount);
for (int fields = 0; fields < fieldCount; ++fields) for (int fields = 0; fields < fieldCount; ++fields)
{ {
TDengineMeta meta = metas[fields]; TDengineMeta meta = metas[fields];
...@@ -131,7 +134,7 @@ namespace Test.UtilsTools ...@@ -131,7 +134,7 @@ namespace Test.UtilsTools
builder.Append(v7); builder.Append(v7);
break; break;
case TDengineDataType.TSDB_DATA_TYPE_BINARY: case TDengineDataType.TSDB_DATA_TYPE_BINARY:
string v8 = Marshal.PtrToStringAnsi(data); string v8 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]);
builder.Append(v8); builder.Append(v8);
break; break;
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
...@@ -139,7 +142,7 @@ namespace Test.UtilsTools ...@@ -139,7 +142,7 @@ namespace Test.UtilsTools
builder.Append(v9); builder.Append(v9);
break; break;
case TDengineDataType.TSDB_DATA_TYPE_NCHAR: case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
string v10 = Marshal.PtrToStringAnsi(data); string v10 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]);
builder.Append(v10); builder.Append(v10);
break; break;
case TDengineDataType.TSDB_DATA_TYPE_JSONTAG: case TDengineDataType.TSDB_DATA_TYPE_JSONTAG:
...@@ -164,6 +167,117 @@ namespace Test.UtilsTools ...@@ -164,6 +167,117 @@ namespace Test.UtilsTools
TDengine.FreeResult(res); Console.WriteLine(""); TDengine.FreeResult(res); Console.WriteLine("");
} }
public static List<List<string>> GetResultSet(IntPtr res)
{
List<List<string>> result = new List<List<string>>();
List<string> colName = new List<string>();
List<string> dataRaw = new List<string>();
long queryRows = 0;
if (!IsValidResult(res))
{
ExitProgram();
}
int fieldCount = TDengine.FieldCount(res);
List<TDengineMeta> metas = TDengine.FetchFields(res);
for (int j = 0; j < metas.Count; j++)
{
TDengineMeta meta = (TDengineMeta)metas[j];
colName.Add(meta.name);
}
result.Add(colName);
IntPtr rowdata;
while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero)
{
queryRows++;
IntPtr colLengthPtr = TDengine.FetchLengths(res);
int[] colLengthArr = new int[fieldCount];
Marshal.Copy(colLengthPtr, colLengthArr, 0, fieldCount);
for (int fields = 0; fields < fieldCount; ++fields)
{
TDengineMeta meta = metas[fields];
int offset = IntPtr.Size * fields;
IntPtr data = Marshal.ReadIntPtr(rowdata, offset);
if (data == IntPtr.Zero)
{
dataRaw.Add("NULL");
continue;
}
switch ((TDengineDataType)meta.type)
{
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
bool v1 = Marshal.ReadByte(data) == 0 ? false : true;
dataRaw.Add(v1.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
byte v2 = Marshal.ReadByte(data);
dataRaw.Add(v2.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
short v3 = Marshal.ReadInt16(data);
dataRaw.Add(v3.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_INT:
int v4 = Marshal.ReadInt32(data);
dataRaw.Add(v4.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
long v5 = Marshal.ReadInt64(data);
dataRaw.Add(v5.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
float v6 = (float)Marshal.PtrToStructure(data, typeof(float));
dataRaw.Add(v6.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
double v7 = (double)Marshal.PtrToStructure(data, typeof(double));
dataRaw.Add(v7.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_BINARY:
string v8 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]);
dataRaw.Add(v8);
break;
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
long v9 = Marshal.ReadInt64(data);
dataRaw.Add(v9.ToString());
break;
case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
string v10 = Marshal.PtrToStringAnsi(data, colLengthArr[fields]);
dataRaw.Add(v10);
break;
}
}
}
result.Add(dataRaw);
if (TDengine.ErrorNo(res) != 0)
{
Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res));
}
TDengine.FreeResult(res); Console.WriteLine("");
return result;
}
public static bool IsValidResult(IntPtr res)
{
if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0))
{
if (res != IntPtr.Zero)
{
Console.Write("reason: " + TDengine.Error(res));
return false;
}
Console.WriteLine("");
return false;
}
return true;
}
public static void CloseConnection(IntPtr conn) public static void CloseConnection(IntPtr conn)
{ {
if (conn != IntPtr.Zero) if (conn != IntPtr.Zero)
...@@ -183,6 +297,18 @@ namespace Test.UtilsTools ...@@ -183,6 +297,18 @@ namespace Test.UtilsTools
List<TDengineMeta> metas = TDengine.FetchFields(res); List<TDengineMeta> metas = TDengine.FetchFields(res);
return metas; return metas;
} }
public static void AssertEqual(string expectVal, string actualVal)
{
if (expectVal == actualVal)
{
Console.WriteLine("{0}=={1} pass", expectVal, actualVal);
}
else
{
Console.WriteLine("{0}=={1} failed", expectVal, actualVal);
ExitProgram();
}
}
public static void ExitProgram() public static void ExitProgram()
{ {
TDengine.Cleanup(); TDengine.Cleanup();
......
...@@ -2,10 +2,14 @@ ...@@ -2,10 +2,14 @@
<PropertyGroup> <PropertyGroup>
<TargetFramework>net5.0</TargetFramework> <TargetFramework>net5.0</TargetFramework>
<IsPackable>false</IsPackable> <IsPackable>false</IsPackable>
</PropertyGroup> </PropertyGroup>
<PropertyGroup>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<DocumentationFile>..\doc\UnitTest.XML</DocumentationFile>
</PropertyGroup>
<ItemGroup> <ItemGroup>
<PackageReference Include="coverlet.msbuild" Version="3.1.0"> <PackageReference Include="coverlet.msbuild" Version="3.1.0">
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets> <IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
......
...@@ -443,16 +443,29 @@ public class TSDBResultSetRowData { ...@@ -443,16 +443,29 @@ public class TSDBResultSetRowData {
case 0: { case 0: {
milliseconds = ts; milliseconds = ts;
fracNanoseconds = (int) (ts * 1_000_000 % 1_000_000_000); fracNanoseconds = (int) (ts * 1_000_000 % 1_000_000_000);
fracNanoseconds = fracNanoseconds < 0 ? 1_000_000_000 + fracNanoseconds : fracNanoseconds;
break; break;
} }
case 1: { case 1: {
milliseconds = ts / 1_000; milliseconds = ts / 1_000;
fracNanoseconds = (int) (ts * 1_000 % 1_000_000_000); fracNanoseconds = (int) (ts * 1_000 % 1_000_000_000);
if (fracNanoseconds < 0) {
if (milliseconds == 0 ){
milliseconds = -1;
}
fracNanoseconds += 1_000_000_000;
}
break; break;
} }
case 2: { case 2: {
milliseconds = ts / 1_000_000; milliseconds = ts / 1_000_000;
fracNanoseconds = (int) (ts % 1_000_000_000); fracNanoseconds = (int) (ts % 1_000_000_000);
if (fracNanoseconds < 0) {
if (milliseconds == 0 ){
milliseconds = -1;
}
fracNanoseconds += 1_000_000_000;
}
break; break;
} }
default: { default: {
......
package com.taosdata.jdbc.cases; package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.annotation.CatalogRunner;
import com.taosdata.jdbc.annotation.Description;
import com.taosdata.jdbc.annotation.TestTarget;
import com.taosdata.jdbc.utils.TimestampUtil; import com.taosdata.jdbc.utils.TimestampUtil;
import org.junit.*; import org.junit.*;
import org.junit.runner.RunWith;
import java.sql.*; import java.sql.*;
@RunWith(CatalogRunner.class)
@TestTarget(alias = "negative value convert to timestamp", author = "huolibo", version = "2.0.37")
public class DatetimeBefore1970Test { public class DatetimeBefore1970Test {
private static final String host = "127.0.0.1"; private static final String host = "127.0.0.1";
private Connection conn; private Connection conn;
@Test @Test
public void test() throws SQLException { @Description("millisecond")
public void msTest() throws SQLException {
conn = createEnvironment("ms");
long now = System.currentTimeMillis();
try (Statement stmt = conn.createStatement()) { try (Statement stmt = conn.createStatement()) {
// given // given
// before
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 00:00:00.001')");
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999')"); stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999')");
// zero
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000')"); stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 08:00:00.000')"); //after
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 07:59:59.999')"); stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.001')");
stmt.executeUpdate("insert into weather(ts) values(" + now + ")");
ResultSet rs = stmt.executeQuery("select * from weather order by ts asc"); ResultSet rs = stmt.executeQuery("select * from weather order by ts asc");
ResultSetMetaData metaData = rs.getMetaData(); ResultSetMetaData metaData = rs.getMetaData();
Assert.assertEquals(2, metaData.getColumnCount()); Assert.assertEquals(2, metaData.getColumnCount());
...@@ -26,44 +39,221 @@ public class DatetimeBefore1970Test { ...@@ -26,44 +39,221 @@ public class DatetimeBefore1970Test {
rs.next(); rs.next();
// then // then
Timestamp ts = rs.getTimestamp("ts"); Timestamp ts = rs.getTimestamp("ts");
Assert.assertEquals("1969-12-31 23:59:59.999", TimestampUtil.longToDatetime(ts.getTime())); Assert.assertEquals(-24 * 60 * 60 * 1000 + 1, ts.getTime());
// when // when
rs.next(); rs.next();
// then // then
ts = rs.getTimestamp("ts"); ts = rs.getTimestamp("ts");
Assert.assertEquals("1970-01-01 00:00:00.000", TimestampUtil.longToDatetime(ts.getTime())); Assert.assertEquals(-1, ts.getTime());
// when // when
rs.next(); rs.next();
// then // then
ts = rs.getTimestamp("ts"); ts = rs.getTimestamp("ts");
Assert.assertEquals("1970-01-01 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime())); Assert.assertEquals(0, ts.getTime());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(1, ts.getTime());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(now, ts.getTime());
}
}
@Test
@Description("microsecond")
public void usTest() throws SQLException {
conn = createEnvironment("us");
long now = System.currentTimeMillis();
try (Statement stmt = conn.createStatement()) {
// given
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 00:00:00.000001')");
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999999')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000000')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000001')");
stmt.executeUpdate("insert into weather(ts) values(" + now + ")");
ResultSet rs = stmt.executeQuery("select * from weather order by ts asc");
ResultSetMetaData metaData = rs.getMetaData();
Assert.assertEquals(2, metaData.getColumnCount());
// when
rs.next();
// then
Timestamp ts = rs.getTimestamp("ts");
Assert.assertEquals(-24 * 60 * 60 * 1000, ts.getTime());
Assert.assertEquals(1_000, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(-1, ts.getTime());
Assert.assertEquals(999_999_000, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(0, ts.getTime());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(0, ts.getTime());
Assert.assertEquals(1_000, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
String s = String.valueOf(now);
Assert.assertEquals(Long.parseLong(s.substring(0, s.length() - 3)), ts.getTime());
Assert.assertEquals(Long.parseLong(s.substring(s.length() - 6) + "000"), ts.getNanos());
}
}
@Test
@Description("nanosecond")
public void nanoTest() throws SQLException {
conn = createEnvironment("ns");
long now = System.currentTimeMillis() * 1000_000L + System.nanoTime() % 1000_000L;
try (Statement stmt = conn.createStatement()) {
// given
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 00:00:00.000000123')");
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999999999')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000000001')");
stmt.executeUpdate("insert into weather(ts) values(" + now + ")");
ResultSet rs = stmt.executeQuery("select * from weather order by ts asc");
ResultSetMetaData metaData = rs.getMetaData();
Assert.assertEquals(2, metaData.getColumnCount());
// when
rs.next();
// then
Timestamp ts = rs.getTimestamp("ts");
Assert.assertEquals(-24 * 60 * 60 * 1_000, ts.getTime());
Assert.assertEquals(123, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(-1, ts.getTime());
Assert.assertEquals(999999999, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(0, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals(1, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
String s = String.valueOf(now);
Assert.assertEquals(Long.parseLong(s.substring(0, s.length() - 6)), ts.getTime());
Assert.assertEquals(Long.parseLong(s.substring(s.length() - 9)), ts.getNanos());
}
}
@Test
@Ignore
@Description("nanosecond convert timestamp when timezone is asia shanghai")
public void asiaShanghaiTest() throws SQLException {
conn = createEnvironment("ns");
long now = System.currentTimeMillis() * 1000_000L + System.nanoTime() % 1000_000L;
try (Statement stmt = conn.createStatement()) {
// given
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 00:00:00.000000123')");
stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999999999')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000')");
stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000000001')");
stmt.executeUpdate("insert into weather(ts) values(" + now + ")");
ResultSet rs = stmt.executeQuery("select * from weather order by ts asc");
ResultSetMetaData metaData = rs.getMetaData();
Assert.assertEquals(2, metaData.getColumnCount());
// when
rs.next();
// then
Timestamp ts = rs.getTimestamp("ts");
Assert.assertEquals("1969-12-31 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime()));
Assert.assertEquals(123, ts.getNanos());
// when // when
rs.next(); rs.next();
// then // then
ts = rs.getTimestamp("ts"); ts = rs.getTimestamp("ts");
Assert.assertEquals("1970-01-01 07:59:59.999", TimestampUtil.longToDatetime(ts.getTime())); Assert.assertEquals("1970-01-01 07:59:59.999", TimestampUtil.longToDatetime(ts.getTime()));
Assert.assertEquals(999999999, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals("1970-01-01 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime()));
Assert.assertEquals(0, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
Assert.assertEquals("1970-01-01 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime()));
Assert.assertEquals(1, ts.getNanos());
// when
rs.next();
// then
ts = rs.getTimestamp("ts");
String s = String.valueOf(now);
Assert.assertEquals(Long.parseLong(s.substring(0, s.length() - 6)), ts.getTime());
Assert.assertEquals(Long.parseLong(s.substring(s.length() - 9)), ts.getNanos());
} }
} }
@Before private Connection createEnvironment(String precision) throws SQLException {
public void before() throws SQLException { String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata&timezone=UTC";
conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); String createSql = "create database if not exists test_timestamp keep 36500";
if (!isEmpty(precision)) {
createSql += " precision '" + precision + "'";
}
conn = DriverManager.getConnection(url);
Statement stmt = conn.createStatement(); Statement stmt = conn.createStatement();
stmt.execute("drop database if exists test_timestamp"); stmt.execute("drop database if exists test_timestamp");
stmt.execute("create database if not exists test_timestamp keep 36500"); stmt.execute(createSql);
stmt.execute("use test_timestamp"); stmt.execute("use test_timestamp");
stmt.execute("create table weather(ts timestamp,f1 float)"); stmt.execute("create table weather(ts timestamp,f1 float)");
stmt.close(); stmt.close();
return conn;
}
private boolean isEmpty(String string) {
return null == string || string.trim().equals("");
} }
@After @After
public void after() throws SQLException { public void after() throws SQLException {
Statement stmt = conn.createStatement(); if (conn != null) {
stmt.execute("drop database if exists test_timestamp"); Statement stmt = conn.createStatement();
if (conn != null) stmt.execute("drop database if exists test_timestamp");
stmt.close();
conn.close(); conn.close();
}
} }
} }
...@@ -274,7 +274,6 @@ int32_t* taosGetErrno(); ...@@ -274,7 +274,6 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615) //"TSDB invalid tag value") #define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615) //"TSDB invalid tag value")
#define TSDB_CODE_TDB_NO_CACHE_LAST_ROW TAOS_DEF_ERROR_CODE(0, 0x0616) //"TSDB no cache last row data") #define TSDB_CODE_TDB_NO_CACHE_LAST_ROW TAOS_DEF_ERROR_CODE(0, 0x0616) //"TSDB no cache last row data")
#define TSDB_CODE_TDB_INCOMPLETE_DFILESET TAOS_DEF_ERROR_CODE(0, 0x0617) //"TSDB incomplete DFileSet") #define TSDB_CODE_TDB_INCOMPLETE_DFILESET TAOS_DEF_ERROR_CODE(0, 0x0617) //"TSDB incomplete DFileSet")
#define TSDB_CODE_TDB_NO_JSON_TAG_KEY TAOS_DEF_ERROR_CODE(0, 0x0618) //"TSDB no tag json key")
// query // query
#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700) //"Invalid handle") #define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700) //"Invalid handle")
......
Subproject commit a3611888d4257a9baa0ce876b04b47c60cc17279 Subproject commit 14a23779d24a9571cdb7165bea2b0208d54c53ad
...@@ -601,6 +601,10 @@ static void monSaveSystemInfo() { ...@@ -601,6 +601,10 @@ static void monSaveSystemInfo() {
} }
static int32_t monGetRowElemCharLen(TAOS_FIELD field, char *rowElem) { static int32_t monGetRowElemCharLen(TAOS_FIELD field, char *rowElem) {
if (field.type != TSDB_DATA_TYPE_BINARY && field.type != TSDB_DATA_TYPE_NCHAR) {
return -1;
}
int32_t charLen = varDataLen(rowElem - VARSTR_HEADER_SIZE); int32_t charLen = varDataLen(rowElem - VARSTR_HEADER_SIZE);
if (field.type == TSDB_DATA_TYPE_BINARY) { if (field.type == TSDB_DATA_TYPE_BINARY) {
assert(charLen <= field.bytes && charLen >= 0); assert(charLen <= field.bytes && charLen >= 0);
...@@ -629,12 +633,14 @@ static int32_t monBuildMasterUptimeSql(char *sql) { ...@@ -629,12 +633,14 @@ static int32_t monBuildMasterUptimeSql(char *sql) {
while ((row = taos_fetch_row(result))) { while ((row = taos_fetch_row(result))) {
for (int i = 0; i < num_fields; ++i) { for (int i = 0; i < num_fields; ++i) {
int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); if (strcmp(fields[i].name, "role") == 0) {
if (strcmp(fields[i].name, "role") == 0 && strncmp((char *)row[i], "master", charLen) == 0) { int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
if (strcmp(fields[i + 1].name, "role_time") == 0) { if (strncmp((char *)row[i], "master", charLen) == 0) {
int64_t now = taosGetTimestamp(TSDB_TIME_PRECISION_MILLI); if (strcmp(fields[i + 1].name, "role_time") == 0) {
//master uptime in seconds int64_t now = taosGetTimestamp(TSDB_TIME_PRECISION_MILLI);
masterUptime = (now - *(int64_t *)row[i + 1]) / 1000; //master uptime in seconds
masterUptime = (now - *(int64_t *)row[i + 1]) / 1000;
}
} }
} }
} }
...@@ -1139,12 +1145,16 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) { ...@@ -1139,12 +1145,16 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) {
pos += snprintf(sql, SQL_LENGTH, "insert into %s.vgroup_%d values(%" PRId64 ", "SQL_STR_FMT, pos += snprintf(sql, SQL_LENGTH, "insert into %s.vgroup_%d values(%" PRId64 ", "SQL_STR_FMT,
tsMonitorDbName, vgId, ts, dbName); tsMonitorDbName, vgId, ts, dbName);
} else { } else {
return TSDB_CODE_SUCCESS; goto DONE;
} }
} else if (strcmp(fields[i].name, "tables") == 0) { } else if (strcmp(fields[i].name, "tables") == 0) {
pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]); pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]);
} else if (strcmp(fields[i].name, "status") == 0) { } else if (strcmp(fields[i].name, "status") == 0) {
charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
if (charLen < 0) {
monError("failed to save vgroup_%d info, reason: invalid row %s len, sql:%s", vgId, (char *)row[i], tsMonitor.sql);
goto DONE;
}
pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]);
} else if (strcmp(fields[i].name, "onlines") == 0) { } else if (strcmp(fields[i].name, "onlines") == 0) {
pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]); pos += snprintf(sql + pos, SQL_LENGTH, ", %d", *(int32_t *)row[i]);
...@@ -1152,6 +1162,10 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) { ...@@ -1152,6 +1162,10 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) {
snprintf(v_dnode_ids, sizeof(v_dnode_ids), "%d;", *(int16_t *)row[i]); snprintf(v_dnode_ids, sizeof(v_dnode_ids), "%d;", *(int16_t *)row[i]);
} else if (v_dnode_str && strcmp(v_dnode_str, "_status") == 0) { } else if (v_dnode_str && strcmp(v_dnode_str, "_status") == 0) {
charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
if (charLen < 0) {
monError("failed to save vgroup_%d info, reason: invalid row %s len, sql:%s", vgId, (char *)row[i], tsMonitor.sql);
goto DONE;
}
snprintf(v_dnode_status, charLen + 1, "%s;", (char *)row[i]); snprintf(v_dnode_status, charLen + 1, "%s;", (char *)row[i]);
} else if (strcmp(fields[i].name, "compacting") == 0) { } else if (strcmp(fields[i].name, "compacting") == 0) {
//flush dnode_ids and dnode_role in to sql //flush dnode_ids and dnode_role in to sql
...@@ -1169,8 +1183,9 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) { ...@@ -1169,8 +1183,9 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) {
monDebug("successfully to save vgroup_%d info, sql:%s", vgId, tsMonitor.sql); monDebug("successfully to save vgroup_%d info, sql:%s", vgId, tsMonitor.sql);
} }
} }
taos_free_result(result);
DONE:
taos_free_result(result);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -1220,12 +1235,24 @@ static void monSaveSlowQueryInfo() { ...@@ -1220,12 +1235,24 @@ static void monSaveSlowQueryInfo() {
if (strcmp(fields[i].name, "query_id") == 0) { if (strcmp(fields[i].name, "query_id") == 0) {
has_slowquery = true; has_slowquery = true;
charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
if (charLen < 0) {
monError("failed to save slow_query info, reason: invalid row %s len, sql:%s", (char *)row[i], tsMonitor.sql);
goto DONE;
}
pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]);
} else if (strcmp(fields[i].name, "user") == 0) { } else if (strcmp(fields[i].name, "user") == 0) {
charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
if (charLen < 0) {
monError("failed to save slow_query info, reason: invalid row %s len, sql:%s", (char *)row[i], tsMonitor.sql);
goto DONE;
}
pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]);
} else if (strcmp(fields[i].name, "qid") == 0) { } else if (strcmp(fields[i].name, "qid") == 0) {
charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
if (charLen < 0) {
monError("failed to save slow_query info, reason: invalid row %s len, sql:%s", (char *)row[i], tsMonitor.sql);
goto DONE;
}
pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]);
} else if (strcmp(fields[i].name, "created_time") == 0) { } else if (strcmp(fields[i].name, "created_time") == 0) {
int64_t create_time = *(int64_t *)row[i]; int64_t create_time = *(int64_t *)row[i];
...@@ -1235,18 +1262,25 @@ static void monSaveSlowQueryInfo() { ...@@ -1235,18 +1262,25 @@ static void monSaveSlowQueryInfo() {
pos += snprintf(sql + pos, SQL_LENGTH, ", %" PRId64 "", *(int64_t *)row[i]); pos += snprintf(sql + pos, SQL_LENGTH, ", %" PRId64 "", *(int64_t *)row[i]);
} else if (strcmp(fields[i].name, "ep") == 0) { } else if (strcmp(fields[i].name, "ep") == 0) {
charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
if (charLen < 0) {
monError("failed to save slow_query info, reason: invalid row %s len, sql:%s", (char *)row[i], tsMonitor.sql);
goto DONE;
}
pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]); pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 1, ", "SQL_STR_FMT, (char *)row[i]);
} else if (strcmp(fields[i].name, "sql") == 0) { } else if (strcmp(fields[i].name, "sql") == 0) {
charLen = monGetRowElemCharLen(fields[i], (char *)row[i]); charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
if (charLen < 0) {
monError("failed to save slow_query info, reason: invalid row %s len, sql:%s", (char *)row[i], tsMonitor.sql);
goto DONE;
}
pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 2, ", "SQL_STR_FMT")", (char *)row[i]); pos += snprintf(sql + pos, strlen(SQL_STR_FMT) + charLen + 2, ", "SQL_STR_FMT")", (char *)row[i]);
} }
} }
} }
monDebug("save slow query, sql:%s", sql); monDebug("save slow query, sql:%s", sql);
taos_free_result(result);
if (!has_slowquery) { if (!has_slowquery) {
return; goto DONE;
} }
void *res = taos_query(tsMonitor.conn, tsMonitor.sql); void *res = taos_query(tsMonitor.conn, tsMonitor.sql);
code = taos_errno(res); code = taos_errno(res);
...@@ -1259,6 +1293,9 @@ static void monSaveSlowQueryInfo() { ...@@ -1259,6 +1293,9 @@ static void monSaveSlowQueryInfo() {
monDebug("successfully to save slowquery info, sql:%s", tsMonitor.sql); monDebug("successfully to save slowquery info, sql:%s", tsMonitor.sql);
} }
DONE:
taos_free_result(result);
return;
} }
static void monSaveDisksInfo() { static void monSaveDisksInfo() {
......
...@@ -1942,7 +1942,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr ...@@ -1942,7 +1942,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr
for (int32_t j = 0; j < pCtx->numOfParams; ++j) { for (int32_t j = 0; j < pCtx->numOfParams; ++j) {
int16_t type = pSqlExpr->param[j].nType; int16_t type = pSqlExpr->param[j].nType;
int16_t bytes = pSqlExpr->param[j].nLen; int16_t bytes = pSqlExpr->param[j].nLen;
if (pSqlExpr->functionId == TSDB_FUNC_STDDEV_DST) { if (pSqlExpr->functionId == TSDB_FUNC_STDDEV_DST || pSqlExpr->functionId == TSDB_FUNC_TS_COMP) {
continue; continue;
} }
......
...@@ -1040,7 +1040,10 @@ void tColModelDisplay(SColumnModel *pModel, void *pData, int32_t numOfRows, int3 ...@@ -1040,7 +1040,10 @@ void tColModelDisplay(SColumnModel *pModel, void *pData, int32_t numOfRows, int3
break; break;
case TSDB_DATA_TYPE_NCHAR: { case TSDB_DATA_TYPE_NCHAR: {
char buf[4096] = {0}; char buf[4096] = {0};
taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf); int32_t len = taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf);
if (len < 0){
qError("castConvert1 taosUcs4ToMbs error");
}
printf("%s\t", buf); printf("%s\t", buf);
break; break;
} }
...@@ -1092,7 +1095,10 @@ void tColModelDisplayEx(SColumnModel *pModel, void *pData, int32_t numOfRows, in ...@@ -1092,7 +1095,10 @@ void tColModelDisplayEx(SColumnModel *pModel, void *pData, int32_t numOfRows, in
break; break;
case TSDB_DATA_TYPE_NCHAR: { case TSDB_DATA_TYPE_NCHAR: {
char buf[128] = {0}; char buf[128] = {0};
taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf); int32_t len = taosUcs4ToMbs(val, pModel->pFields[j].field.bytes, buf);
if (len < 0){
qError("castConvert1 taosUcs4ToMbs error");
}
printf("%s\t", buf); printf("%s\t", buf);
break; break;
} }
......
...@@ -1899,12 +1899,20 @@ int32_t filterInitValFieldData(SFilterInfo *info) { ...@@ -1899,12 +1899,20 @@ int32_t filterInitValFieldData(SFilterInfo *info) {
(unit->compare.optr == TSDB_RELATION_MATCH || unit->compare.optr == TSDB_RELATION_NMATCH)){ (unit->compare.optr == TSDB_RELATION_MATCH || unit->compare.optr == TSDB_RELATION_NMATCH)){
char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0}; char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0};
int32_t len = taosUcs4ToMbs(varDataVal(fi->data), varDataLen(fi->data), varDataVal(newValData)); int32_t len = taosUcs4ToMbs(varDataVal(fi->data), varDataLen(fi->data), varDataVal(newValData));
if (len < 0){
qError("filterInitValFieldData taosUcs4ToMbs error 1");
return TSDB_CODE_FAILED;
}
varDataSetLen(newValData, len); varDataSetLen(newValData, len);
varDataCopy(fi->data, newValData); varDataCopy(fi->data, newValData);
}else if(type == TSDB_DATA_TYPE_JSON && }else if(type == TSDB_DATA_TYPE_JSON &&
(unit->compare.optr == TSDB_RELATION_MATCH || unit->compare.optr == TSDB_RELATION_NMATCH)){ (unit->compare.optr == TSDB_RELATION_MATCH || unit->compare.optr == TSDB_RELATION_NMATCH)){
char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0}; char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0};
int32_t len = taosUcs4ToMbs(((tVariant*)(fi->desc))->pz, ((tVariant*)(fi->desc))->nLen, newValData); int32_t len = taosUcs4ToMbs(((tVariant*)(fi->desc))->pz, ((tVariant*)(fi->desc))->nLen, newValData);
if (len < 0){
qError("filterInitValFieldData taosUcs4ToMbs error 2");
return TSDB_CODE_FAILED;
}
memcpy(((tVariant*)(fi->desc))->pz, newValData, len); memcpy(((tVariant*)(fi->desc))->pz, newValData, len);
((tVariant*)(fi->desc))->nLen = len; ((tVariant*)(fi->desc))->nLen = len;
} }
...@@ -3025,6 +3033,11 @@ static void doJsonCompare(SFilterComUnit *cunit, int8_t *result, void* colData){ ...@@ -3025,6 +3033,11 @@ static void doJsonCompare(SFilterComUnit *cunit, int8_t *result, void* colData){
}else{ }else{
char *newColData = calloc(cunit->dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1); char *newColData = calloc(cunit->dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1);
int len = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), varDataVal(newColData)); int len = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), varDataVal(newColData));
if (len < 0){
qError("castConvert1 taosUcs4ToMbs error");
tfree(newColData);
return;
}
varDataSetLen(newColData, len); varDataSetLen(newColData, len);
tVariant* val = cunit->valData; tVariant* val = cunit->valData;
char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0}; char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0};
...@@ -3113,9 +3126,13 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SDataStat ...@@ -3113,9 +3126,13 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SDataStat
if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_NCHAR && (info->cunits[uidx].optr == TSDB_RELATION_MATCH || info->cunits[uidx].optr == TSDB_RELATION_NMATCH)){ if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_NCHAR && (info->cunits[uidx].optr == TSDB_RELATION_MATCH || info->cunits[uidx].optr == TSDB_RELATION_NMATCH)){
char *newColData = calloc(info->cunits[uidx].dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1); char *newColData = calloc(info->cunits[uidx].dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1);
int len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData)); int32_t len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData));
varDataSetLen(newColData, len); if (len < 0){
(*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, newColData, info->cunits[uidx].valData); qError("castConvert1 taosUcs4ToMbs error");
}else{
varDataSetLen(newColData, len);
(*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, newColData, info->cunits[uidx].valData);
}
tfree(newColData); tfree(newColData);
}else if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_JSON){ }else if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_JSON){
doJsonCompare(&(info->cunits[uidx]), &(*p)[i], colData); doJsonCompare(&(info->cunits[uidx]), &(*p)[i], colData);
...@@ -3170,9 +3187,13 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis * ...@@ -3170,9 +3187,13 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *
} else { } else {
if(cunit->dataType == TSDB_DATA_TYPE_NCHAR && (cunit->optr == TSDB_RELATION_MATCH || cunit->optr == TSDB_RELATION_NMATCH)){ if(cunit->dataType == TSDB_DATA_TYPE_NCHAR && (cunit->optr == TSDB_RELATION_MATCH || cunit->optr == TSDB_RELATION_NMATCH)){
char *newColData = calloc(cunit->dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1); char *newColData = calloc(cunit->dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1);
int len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData)); int32_t len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData));
varDataSetLen(newColData, len); if (len < 0){
(*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, newColData, cunit->valData); qError("castConvert1 taosUcs4ToMbs error");
}else{
varDataSetLen(newColData, len);
(*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, newColData, cunit->valData);
}
tfree(newColData); tfree(newColData);
}else if(cunit->dataType == TSDB_DATA_TYPE_JSON){ }else if(cunit->dataType == TSDB_DATA_TYPE_JSON){
doJsonCompare(cunit, &(*p)[i], colData); doJsonCompare(cunit, &(*p)[i], colData);
...@@ -3577,7 +3598,11 @@ int32_t filterConverNcharColumns(SFilterInfo* info, int32_t rows, bool *gotNchar ...@@ -3577,7 +3598,11 @@ int32_t filterConverNcharColumns(SFilterInfo* info, int32_t rows, bool *gotNchar
char *src = FILTER_GET_COL_FIELD_DATA(fi, j); char *src = FILTER_GET_COL_FIELD_DATA(fi, j);
char *dst = FILTER_GET_COL_FIELD_DATA(&nfi, j); char *dst = FILTER_GET_COL_FIELD_DATA(&nfi, j);
int32_t len = 0; int32_t len = 0;
taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len); bool ret = taosMbsToUcs4(varDataVal(src), varDataLen(src), varDataVal(dst), bufSize, &len);
if(!ret) {
qError("filterConverNcharColumns taosMbsToUcs4 error");
return TSDB_CODE_FAILED;
}
varDataLen(dst) = len; varDataLen(dst) = len;
} }
......
...@@ -1463,6 +1463,7 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) { ...@@ -1463,6 +1463,7 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) {
tsdbFreeTable(pTable); tsdbFreeTable(pTable);
return NULL; return NULL;
} }
taosHashSetFreeFp(pTable->jsonKeyMap, taosArrayDestroyForHash);
}else{ }else{
pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL,
SL_ALLOW_DUP_KEY, getTagIndexKey); SL_ALLOW_DUP_KEY, getTagIndexKey);
......
...@@ -4243,20 +4243,28 @@ char* parseTagDatatoJson(void *p){ ...@@ -4243,20 +4243,28 @@ char* parseTagDatatoJson(void *p){
} }
cJSON_AddItemToObject(json, tagJsonKey, value); cJSON_AddItemToObject(json, tagJsonKey, value);
}else if(type == TSDB_DATA_TYPE_NCHAR) { }else if(type == TSDB_DATA_TYPE_NCHAR) {
char *tagJsonValue = calloc(varDataLen(realData), 1); cJSON* value = NULL;
int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), tagJsonValue); if (varDataLen(realData) > 0){
if (length < 0) { char *tagJsonValue = calloc(varDataLen(realData), 1);
tsdbError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, int32_t length = taosUcs4ToMbs(varDataVal(realData), varDataLen(realData), tagJsonValue);
(char*)val); if (length < 0) {
tsdbError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset,
(char*)val);
free(tagJsonValue);
goto end;
}
value = cJSON_CreateString(tagJsonValue);
free(tagJsonValue); free(tagJsonValue);
goto end; if (value == NULL)
} {
cJSON* value = cJSON_CreateString(tagJsonValue); goto end;
free(tagJsonValue); }
if (value == NULL) }else if(varDataLen(realData) == 0){
{ value = cJSON_CreateString("");
goto end; }else{
assert(0);
} }
cJSON_AddItemToObject(json, tagJsonKey, value); cJSON_AddItemToObject(json, tagJsonKey, value);
}else if(type == TSDB_DATA_TYPE_DOUBLE){ }else if(type == TSDB_DATA_TYPE_DOUBLE){
double jsonVd = *(double*)(realData); double jsonVd = *(double*)(realData);
......
...@@ -282,7 +282,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_MESSED_MSG, "TSDB messed message") ...@@ -282,7 +282,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_MESSED_MSG, "TSDB messed message")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVLD_TAG_VAL, "TSDB invalid tag value") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVLD_TAG_VAL, "TSDB invalid tag value")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_CACHE_LAST_ROW, "TSDB no cache last row data") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_CACHE_LAST_ROW, "TSDB no cache last row data")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INCOMPLETE_DFILESET, "Incomplete DFileSet") TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INCOMPLETE_DFILESET, "Incomplete DFileSet")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_JSON_TAG_KEY, "TSDB no tag json key")
// query // query
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, "Invalid handle") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, "Invalid handle")
......
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
cd ${WKC}/src/connector/C#
dotnet test
dotnet run --project src/test/Cases/Cases.csproj
cd ${WKC}/tests/examples/C#
dotnet run --project C#checker/C#checker.csproj
dotnet run --project TDengineTest/TDengineTest.csproj
dotnet run --project schemaless/schemaless.csproj
cd ${WKC}/tests/examples/C#/taosdemo
dotnet build -c Release
tree | true
./bin/Release/net5.0/taosdemo -c /etc/taos -y
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
cd ${WKC}/src/connector/nodejs
npm install
npm run test
cd ${WKC}/tests/examples/nodejs
npm install td2.0-connector > /dev/null 2>&1
node nodejsChecker.js host=localhost
node test1970.js
cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport
npm install td2.0-connector > /dev/null 2>&1
node nanosecondTest.js
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../src/connector/python
pip3 install pytest
pytest tests/
python3 examples/bind-multi.py
python3 examples/bind-row.py
python3 examples/demo.py
python3 examples/insert-lines.py
python3 examples/pep-249.py
python3 examples/query-async.py
python3 examples/query-objectively.py
python3 examples/subscribe-sync.py
python3 examples/subscribe-async.py
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
\ No newline at end of file
bash 3-connectors/c#/test.sh
bash 3-connectors/go/test.sh
bash 3-connectors/java/test.sh
bash 3-connectors/nodejs/test.sh
bash 3-connectors/python/test.sh
bash 3-connectors/restful/test.sh
bash 3-connectors/rust/test.sh
python3 ./test.py -f 1-insert/batchInsert.py python3 ./test.py -f 1-insert/batchInsert.py
\ No newline at end of file
python3 ./test.py -f 0-others/json_tag.py python3 ./test.py -f 0-others/json_tag.py
\ No newline at end of file
python3 ./test.py -f 2-query/ts_hidden_column.py python3 ./test.py -f 2-query/ts_hidden_column.py
python3 ./test.py -f 2-query/union-order.py python3 ./test.py -f 2-query/union-order.py
python3 ./test.py -f 2-query/session_two_stage.py python3 ./test.py -f 2-query/session_two_stage.py
python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeJson.py python3 ./test.py -f 5-taos-tools/taosdump/taosdumpTestTypeJson.py
\ No newline at end of file
#!/bin/bash #!/bin/bash
ulimit -c unlimited ulimit -c unlimited
#======================p1-start=============== \ No newline at end of file
# restful test for python
# python3 test.py -f restful/restful_bind_db1.py
# python3 test.py -f restful/restful_bind_db2.py
python3 ./test.py -f client/nettest.py
python3 ./test.py -f ../system-test/4-taosAdapter/taosAdapter_query.py
python3 ./test.py -f ../system-test/4-taosAdapter/taosAdapter_insert.py
#======================p1-end===============
...@@ -4,7 +4,6 @@ ulimit -c unlimited ...@@ -4,7 +4,6 @@ ulimit -c unlimited
python3 testCompress.py python3 testCompress.py
python3 testNoCompress.py python3 testNoCompress.py
python3 ./test.py -f import_merge/importBlock1HO.py python3 ./test.py -f import_merge/importBlock1HO.py
python3 ./test.py -f import_merge/importBlock1HPO.py python3 ./test.py -f import_merge/importBlock1HPO.py
python3 ./test.py -f import_merge/importBlock1H.py python3 ./test.py -f import_merge/importBlock1H.py
...@@ -23,10 +22,6 @@ python3 ./test.py -f import_merge/importBlock2TPO.py ...@@ -23,10 +22,6 @@ python3 ./test.py -f import_merge/importBlock2TPO.py
python3 ./test.py -f import_merge/importBlock2T.py python3 ./test.py -f import_merge/importBlock2T.py
python3 ./test.py -f import_merge/importBlockbetween.py python3 ./test.py -f import_merge/importBlockbetween.py
python3 ./test.py -f import_merge/importCacheFileHO.py python3 ./test.py -f import_merge/importCacheFileHO.py
#======================p1-end===============
#======================p2-start===============
python3 ./test.py -f import_merge/importCacheFileHPO.py python3 ./test.py -f import_merge/importCacheFileHPO.py
python3 ./test.py -f import_merge/importCacheFileH.py python3 ./test.py -f import_merge/importCacheFileH.py
python3 ./test.py -f import_merge/importCacheFileS.py python3 ./test.py -f import_merge/importCacheFileS.py
...@@ -48,10 +43,6 @@ python3 ./test.py -f import_merge/importDataLastTPO.py ...@@ -48,10 +43,6 @@ python3 ./test.py -f import_merge/importDataLastTPO.py
python3 ./test.py -f import_merge/importDataLastT.py python3 ./test.py -f import_merge/importDataLastT.py
python3 ./test.py -f import_merge/importDataS.py python3 ./test.py -f import_merge/importDataS.py
python3 ./test.py -f import_merge/importDataSub.py python3 ./test.py -f import_merge/importDataSub.py
#======================p2-end===============
#======================p3-start===============
python3 ./test.py -f import_merge/importDataTO.py python3 ./test.py -f import_merge/importDataTO.py
python3 ./test.py -f import_merge/importDataTPO.py python3 ./test.py -f import_merge/importDataTPO.py
python3 ./test.py -f import_merge/importDataT.py python3 ./test.py -f import_merge/importDataT.py
...@@ -73,10 +64,6 @@ python3 ./test.py -f import_merge/importSpan.py ...@@ -73,10 +64,6 @@ python3 ./test.py -f import_merge/importSpan.py
python3 ./test.py -f import_merge/importSRestart.py python3 ./test.py -f import_merge/importSRestart.py
python3 ./test.py -f import_merge/importSubRestart.py python3 ./test.py -f import_merge/importSubRestart.py
python3 ./test.py -f import_merge/importTailOverlap.py python3 ./test.py -f import_merge/importTailOverlap.py
#======================p3-end===============
#======================p4-start===============
python3 ./test.py -f import_merge/importTailPartOverlap.py python3 ./test.py -f import_merge/importTailPartOverlap.py
python3 ./test.py -f import_merge/importTail.py python3 ./test.py -f import_merge/importTail.py
python3 ./test.py -f import_merge/importToCommit.py python3 ./test.py -f import_merge/importToCommit.py
...@@ -88,7 +75,6 @@ python3 ./test.py -f import_merge/importCSV.py ...@@ -88,7 +75,6 @@ python3 ./test.py -f import_merge/importCSV.py
python3 ./test.py -f import_merge/import_update_0.py python3 ./test.py -f import_merge/import_update_0.py
python3 ./test.py -f import_merge/import_update_1.py python3 ./test.py -f import_merge/import_update_1.py
python3 ./test.py -f import_merge/import_update_2.py python3 ./test.py -f import_merge/import_update_2.py
python3 ./test.py -f insert/basic.py python3 ./test.py -f insert/basic.py
python3 ./test.py -f insert/int.py python3 ./test.py -f insert/int.py
python3 ./test.py -f insert/float.py python3 ./test.py -f insert/float.py
...@@ -98,8 +84,6 @@ python3 ./test.py -f insert/double.py ...@@ -98,8 +84,6 @@ python3 ./test.py -f insert/double.py
python3 ./test.py -f insert/smallint.py python3 ./test.py -f insert/smallint.py
python3 ./test.py -f insert/tinyint.py python3 ./test.py -f insert/tinyint.py
python3 ./test.py -f insert/date.py python3 ./test.py -f insert/date.py
python3 ./test.py -f insert/binary.py python3 ./test.py -f insert/binary.py
python3 ./test.py -f insert/nchar.py python3 ./test.py -f insert/nchar.py
#python3 ./test.py -f insert/nchar-boundary.py #python3 ./test.py -f insert/nchar-boundary.py
...@@ -133,41 +117,21 @@ python3 ./test.py -f insert/verifyMemToDiskCrash.py ...@@ -133,41 +117,21 @@ python3 ./test.py -f insert/verifyMemToDiskCrash.py
#python3 ./test.py -f insert/schemalessInsert.py #python3 ./test.py -f insert/schemalessInsert.py
#python3 ./test.py -f insert/openTsdbJsonInsert.py #python3 ./test.py -f insert/openTsdbJsonInsert.py
python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py
# update
python3 ./test.py -f update/merge_commit_data.py python3 ./test.py -f update/merge_commit_data.py
python3 ./test.py -f update/allow_update.py python3 ./test.py -f update/allow_update.py
python3 ./test.py -f update/allow_update-0.py python3 ./test.py -f update/allow_update-0.py
python3 ./test.py -f update/append_commit_data.py python3 ./test.py -f update/append_commit_data.py
python3 ./test.py -f update/append_commit_last-0.py python3 ./test.py -f update/append_commit_last-0.py
python3 ./test.py -f update/append_commit_last.py python3 ./test.py -f update/append_commit_last.py
python3 ./test.py -f update/merge_commit_data2.py python3 ./test.py -f update/merge_commit_data2.py
python3 ./test.py -f update/merge_commit_data2_update0.py python3 ./test.py -f update/merge_commit_data2_update0.py
python3 ./test.py -f update/merge_commit_last-0.py python3 ./test.py -f update/merge_commit_last-0.py
python3 ./test.py -f update/merge_commit_last.py python3 ./test.py -f update/merge_commit_last.py
python3 ./test.py -f update/update_options.py python3 ./test.py -f update/update_options.py
python3 ./test.py -f update/merge_commit_data-0.py python3 ./test.py -f update/merge_commit_data-0.py
# wal
python3 ./test.py -f wal/addOldWalTest.py python3 ./test.py -f wal/addOldWalTest.py
python3 ./test.py -f wal/sdbComp.py python3 ./test.py -f wal/sdbComp.py
#======================p4-end===============
#======================p5-start===============
python3 ./test.py -f ../system-test/1-insert/0-sql/basic.py
python3 ./test.py -f ../develop-test/1-insert/0-sql/basic.py
python3 ./test.py -f ../develop-test/1-insert/0-sql/batchInsert.py
#======================p5-end===============
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, db_test.stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
import time
import random
import datetime
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
# def assertCheck(self, filename, queryResult, expectResult):
# self.filename = filename
# self.queryResult = queryResult
# self.expectResult = expectResult
# args0 = (filename, queryResult, expectResult)
# assert queryResult == expectResult, "Queryfile:%s ,result is %s != expect: %s" % args0
def assertfileDataExport(self, filename, expectResult):
self.filename = filename
self.expectResult = expectResult
with open("%s" % filename, 'r+') as f1:
for line in f1.readlines():
queryResultTaosc = line.strip().split(',')[0]
# self.assertCheck(filename, queryResultTaosc, expectResult)
def run(self):
starttime = 1537146000000
tdSql.prepare()
tdSql.execute("drop database if exists db_json;")
print("==============step1 tag format =======")
tdLog.info("create database db_jsonB ")
tdSql.execute("create database db_jsonB")
tdSql.execute("use db_jsonB")
# test Benchmark
tdSql.execute("create table if not exists jsons1(ts timestamp,dataFloat float) tags(jtag json)")
for numTables in range(500):
json = "{\"loc1%d\":\"beijingandshanghaiandchangzhouandshijiazhuanganda%d\",\"loc2%d\":\"beijingandshanghaiandchangzhouandshijiazhuangandb%d\" ,\"loc3%d\":\"beijingandshanghaiandchangzhouandshijiazhuangandc%d\",\
\"loc4%d\":\"beijingandshanghaiandchangzhouandshijiazhuangandd%d\",\"loc5%d\":\"beijingandshanghaiandchangzhouandshijiazhuangande%d\",\"loc6%d\":\"beijingandshanghaiandchangzhouandshijiazhuangandf%d\",\
\"loc7%d\":\"beijingandshanghaiandchangzhouandshijiazhuangandg%d\"}"% (numTables,numTables,numTables,numTables,numTables,numTables,numTables,numTables,numTables,numTables,numTables,numTables,numTables,numTables)
print(json)
createTableSqls = "create table if not exists jsons1_%d using jsons1 tags('%s')" %(numTables,json)
print(createTableSqls)
tdLog.info(createTableSqls)
tdSql.execute(createTableSqls)
for numRecords in range(1,101):
dataFloatSql=numRecords*10+numRecords*0.01
insertDataSqls = "insert into jsons1_%d values(%d+%ds, %d) " %(numTables,starttime,numRecords,dataFloatSql)
tdLog.info(insertDataSqls)
tdSql.execute(insertDataSqls)
tdSql.execute("use db_jsonB")
now_time1 = datetime.datetime.now()
tdSql.query("select * from jsons1 where ts>1537145900000 and ts<1537156000000;")
spendTimes1 = datetime.datetime.now() - now_time1
print(spendTimes1)
now_time2 = datetime.datetime.now()
tdSql.query("select * from jsons1 where ts>1537156000000;")
spendTimes2 = datetime.datetime.now() - now_time2
print(spendTimes2)
tdSql.execute("drop database db_jsonB")
testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf tools/taosdemoAllTest/%s.sql" % testcaseFilename )
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import sys import sys
import os
import taos import taos
from util.log import tdLog from util.log import tdLog
from util.cases import tdCases from util.cases import tdCases
...@@ -25,6 +26,21 @@ class TDTestCase: ...@@ -25,6 +26,21 @@ class TDTestCase:
tdLog.debug("start to execute %s" % __file__) tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql) tdSql.init(conn.cursor(), logSql)
# def assertCheck(self, filename, queryResult, expectResult):
# self.filename = filename
# self.queryResult = queryResult
# self.expectResult = expectResult
# args0 = (filename, queryResult, expectResult)
# assert queryResult == expectResult, "Queryfile:%s ,result is %s != expect: %s" % args0
def assertfileDataExport(self, filename, expectResult):
self.filename = filename
self.expectResult = expectResult
with open("%s" % filename, 'r+') as f1:
for line in f1.readlines():
queryResultTaosc = line.strip().split(',')[0]
# self.assertCheck(filename, queryResultTaosc, expectResult)
def run(self): def run(self):
tdSql.prepare() tdSql.prepare()
tdSql.execute("drop database if exists db_json;") tdSql.execute("drop database if exists db_json;")
...@@ -62,8 +78,18 @@ class TDTestCase: ...@@ -62,8 +78,18 @@ class TDTestCase:
tdSql.error("CREATE TABLE if not exists jsons4_1 using jsons4 tags('{\"%s1\":5}')" % char2) # len(key)=257 tdSql.error("CREATE TABLE if not exists jsons4_1 using jsons4 tags('{\"%s1\":5}')" % char2) # len(key)=257
tdSql.execute("CREATE TABLE if not exists jsons4_2 using jsons4 tags('{\"T\":\"%s\"}')" % char3) # len(object)=4096 tdSql.execute("CREATE TABLE if not exists jsons4_2 using jsons4 tags('{\"T\":\"%s\"}')" % char3) # len(object)=4096
tdSql.error("CREATE TABLE if not exists jsons4_2 using jsons4 tags('{\"TS\":\"%s\"}')" % char3) # len(object)=4097 tdSql.error("CREATE TABLE if not exists jsons4_2 using jsons4 tags('{\"TS\":\"%s\"}')" % char3) # len(object)=4097
# test the min/max length of double type , and int64 is not required
tdSql.error("CREATE TABLE if not exists jsons4_3 using jsons4 tags('{\"doublength\":-1.8e308}')")
tdSql.error("CREATE TABLE if not exists jsons4_3 using jsons4 tags('{\"doublength\":1.8e308}')")
tdSql.execute("CREATE TABLE if not exists jsons4_4 using jsons4 tags('{\"doublength\":-1.7e308}')")
tdSql.execute("CREATE TABLE if not exists jsons4_5 using jsons4 tags('{\"doublength\":1.71e308}')")
tdSql.query("select jtag from jsons4 where jtag->'doublength'<-1.69e+308;")
tdSql.checkRows(1)
tdSql.query("select jtag from jsons4 where jtag->'doublength'>1.7e+308;")
tdSql.checkRows(1)
tdSql.execute("insert into jsons1_1 values(now, 1, 'json1')") tdSql.execute("insert into jsons1_1 values(now+2s, 1, 'json1')")
tdSql.execute("insert into jsons1_1 values(now+1s, 1, 'json1')") tdSql.execute("insert into jsons1_1 values(now+1s, 1, 'json1')")
tdSql.execute("insert into jsons1_2 using jsons1 tags('{\"num\":5,\"location\":\"beijing\"}') values (now, 1, 'json2')") tdSql.execute("insert into jsons1_2 using jsons1 tags('{\"num\":5,\"location\":\"beijing\"}') values (now, 1, 'json2')")
tdSql.execute("insert into jsons1_3 using jsons1 tags('{\"num\":34,\"location\":\"beijing\",\"level\":\"l1\"}') values (now, 1, 'json3')") tdSql.execute("insert into jsons1_3 using jsons1 tags('{\"num\":34,\"location\":\"beijing\",\"level\":\"l1\"}') values (now, 1, 'json3')")
...@@ -194,15 +220,12 @@ class TDTestCase: ...@@ -194,15 +220,12 @@ class TDTestCase:
tdSql.checkRows(0) tdSql.checkRows(0)
# # test where condition in # # test where condition in
# tdSql.query("select * from jsons1 where jtag->'location' in ('beijing')") tdSql.error("select * from jsons1 where jtag->'location' in ('beijing')")
# tdSql.checkRows(3) # tdSql.checkRows(3)
tdSql.error("select * from jsons1 where jtag->'num' in (5,34)")
# tdSql.query("select * from jsons1 where jtag->'num' in (5,34)")
# tdSql.checkRows(2) # tdSql.checkRows(2)
tdSql.error("select * from jsons1 where jtag->'num' in ('5',34)")
# tdSql.error("select * from jsons1 where jtag->'num' in ('5',34)") tdSql.error("select * from jsons1 where jtag->'location' in ('beijing') and jtag->'class'=55")
# tdSql.query("select * from jsons1 where jtag->'location' in ('beijing') and jtag->'class'=55")
# tdSql.checkRows(1) # tdSql.checkRows(1)
# test where condition match # test where condition match
...@@ -412,8 +435,25 @@ class TDTestCase: ...@@ -412,8 +435,25 @@ class TDTestCase:
tdSql.query(" select stddev(dataint) from jsons1 where jtag->'location'='beijing';") tdSql.query(" select stddev(dataint) from jsons1 where jtag->'location'='beijing';")
tdSql.checkRows(1) tdSql.checkRows(1)
tdSql.error(" select LEASTSQUARES(dataint,1,2) from jsons1_1 where jtag->'location' ='beijing' ;") tdSql.error(" select LEASTSQUARES(dataint,1,2) from jsons1_1 where jtag->'location' ='beijing' ;")
tdSql.query("select count(jtag) from jsons1 ;")
tdSql.checkData(0, 0, 15)
tdSql.error("select count( jtag->'location'='beijing') from jsons1 ;")
tdSql.error("select count( jtag contains 'age') from jsons1 ;")
functionName = ['avg','twa','irate','stddev', 'stddev', 'leastsquares']
print(functionName)
for fn in functionName:
tdSql.error("select %s( jtag) from jsons1 ;"%fn)
tdSql.error("select %s( jtag->'location'='beijing') from jsons1 ;"%fn)
tdSql.error("select %s( jtag contains 'age') from jsons1 ;"%fn)
# tdSql.error("select avg( jtag) from jsons1 ;")
# tdSql.error("select avg( jtag->'location'='beijing') from jsons1 ;")
# tdSql.error("select avg( jtag contains 'age') from jsons1 ;")
# Select_exprs is SQL function -Selection function # Select_exprs is SQL function -Selection function
...@@ -467,6 +507,13 @@ class TDTestCase: ...@@ -467,6 +507,13 @@ class TDTestCase:
tdSql.checkRows(4) tdSql.checkRows(4)
tdSql.checkData(0,1,2) tdSql.checkData(0,1,2)
tdSql.checkData(2,1,4) tdSql.checkData(2,1,4)
#error
functionName = ['min','max','last','TOP','last_row','bottom','apercentile','interp']
print(functionName)
for fn in functionName:
tdSql.error("select %s( jtag) from jsons1 ;"%fn)
tdSql.error("select %s( jtag->'location'='beijing') from jsons1 ;"%fn)
tdSql.error("select %s( jtag contains 'age') from jsons1 ;"%fn)
# Select_exprs is SQL function -Calculation function # Select_exprs is SQL function -Calculation function
tdSql.error(" select diff(dataint) from jsons1 where jtag->'location'= 'beijing' or jtag->'location'= 'tianjing'or jtag contains 'num' or jtag->'age'=35 ;") tdSql.error(" select diff(dataint) from jsons1 where jtag->'location'= 'beijing' or jtag->'location'= 'tianjing'or jtag contains 'num' or jtag->'age'=35 ;")
...@@ -500,13 +547,21 @@ class TDTestCase: ...@@ -500,13 +547,21 @@ class TDTestCase:
tdSql.query("select ts,round(dataint),round(datafloat),round(datadouble) from jsons7 where jtag contains 'tea';") tdSql.query("select ts,round(dataint),round(datafloat),round(datadouble) from jsons7 where jtag contains 'tea';")
tdSql.query("select round(dataint),round(datafloat),round(datadouble) from jsons7 where jtag contains 'tea';") tdSql.query("select round(dataint),round(datafloat),round(datadouble) from jsons7 where jtag contains 'tea';")
functionName = ['diff','Derivative','SPREAD','ceil','round','floor']
print(functionName)
for fn in functionName:
tdSql.error("select %s( jtag) from jsons1 ;"%fn)
tdSql.error("select %s( jtag->'location'='beijing') from jsons1 ;"%fn)
tdSql.error("select %s( jtag contains 'age') from jsons1 ;"%fn)
#modify one same key and diffirent data type,include negative number of double #modify one same key and diffirent data type,include negative number of double
tdSql.execute("insert into jsons7_4 using jsons7 tags('{\"nv\":null,\"tea\":123,\"tag\":123,\"tea\":false}') values (now+1s,5,'true',4.01,2.2,'abc'); ") tdSql.execute("insert into jsons7_4 using jsons7 tags('{\"nv\":null,\"tea\":123,\"tag\":123,\"tea\":false}') values (now+1s,5,'true',4.01,2.2,'abc'); ")
tdSql.execute("insert into jsons7_5 using jsons7 tags('{\"nv\":null,\"tea\":\"app\",\"tag\":123,\"tea\":false}') values (now+2s,5,'true',4.01,2.2,'abc'); ") tdSql.execute("insert into jsons7_5 using jsons7 tags('{\"nv\":null,\"tea\":\"app\",\"tag\":123,\"tea\":false}') values (now+2s,5,'true',4.01,2.2,'abc'); ")
tdSql.error("insert into jsons7_6 using jsons7 tags('{\"nv\":null,\"tea\":-1.111111111111111111111111111111111111111111111111111111111111111111111,\"tag\":123,\"tea\":false}') values (now+3s,5,'true',4.01,2.2,'123'); ") tdSql.error("insert into jsons7_6 using jsons7 tags('{\"nv\":null,\"tea\":-1.111111111111111111111111111111111111111111111111111111111111111111111,\"tag\":123,\"tea\":false}') values (now+3s,5,'true',4.01,2.2,'123'); ")
tdSql.execute("insert into jsons7_6 using jsons7 tags('{\"nv\":null,\"tea\":-1.111111111,\"tag\":123,\"tea\":false}') values (now,5,'false',4.01,2.2,'t123'); ") tdSql.execute("insert into jsons7_6 using jsons7 tags('{\"nv\":null,\"tea\":-1.111111111,\"tag\":123,\"tea\":false}') values (now,5,'false',4.01,2.2,'t123'); ")
tdSql.query("select jtag from jsons7 where jtag->'tea'>-1.01;") tdSql.query("select jtag from jsons7 where jtag->'tea'<-1.01;")
# tdSql.checkRows(2) tdSql.checkRows(1)
# test join # test join
tdSql.execute("create table if not exists jsons6(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50)) tags(jtag json)") tdSql.execute("create table if not exists jsons6(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50)) tags(jtag json)")
...@@ -572,6 +627,36 @@ class TDTestCase: ...@@ -572,6 +627,36 @@ class TDTestCase:
tdSql.query(" select stddev(dataint) from jsons8 group by datatime;") tdSql.query(" select stddev(dataint) from jsons8 group by datatime;")
tdSql.error(" select stddev(datatime) from jsons8 group by datadouble;") tdSql.error(" select stddev(datatime) from jsons8 group by datadouble;")
# # verify the tag length of the super table and the child table
# TD-12389
# tdSql.query("describe jsons1;")
# jtagLengthSup=tdSql.queryResult[3][2]
# tdSql.query("describe jsons1_1;")
# tdSql.checkData(3, 2, jtagLengthSup)
# #test import and export
# tdSql.execute("select * from jsons1 >> jsons1_data.csv;")
# tdSql.query("select * from jsons1 ")
# with open("./jsons1_data.csv", 'r+') as f1:
# # count=len(open("./jsons1_data.csv",'rU').readlines())
# # print(count)
# rows=0
# for line in f1.readlines():
# # for columns in range(4): # it will be replaced with column length later,but now it is setted to a fixed value first
# queryResultInt = line.strip().split(',')[1]
# # queryResultTag = line.strip().split(',')[3]
# # for rows in range(9):
# # print(rows,1,queryResultInt,queryResultTag)
# tdSql.checkData(rows, 1, "%s" %queryResultInt)
# # tdSql.checkData(rows, 3, "%s" %queryResultTag)
# rows +=1
# # test taos -f
# os.system("taos -f stable/json_tag_extra.py.sql ")
# tdSql.execute("use db_json")
# tdSql.query("select * from jsons1")
# tdSql.checkRows(9)
# # test drop tables and databases # # test drop tables and databases
# tdSql.execute("drop table jsons1_1") # tdSql.execute("drop table jsons1_1")
...@@ -579,7 +664,10 @@ class TDTestCase: ...@@ -579,7 +664,10 @@ class TDTestCase:
# tdSql.execute("drop stable jsons3") # tdSql.execute("drop stable jsons3")
# tdSql.execute("drop stable jsons2") # tdSql.execute("drop stable jsons2")
# tdSql.execute("drop database db_json") # tdSql.execute("drop database db_json")
testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf tools/taosdemoAllTest/%s.sql" % testcaseFilename )
def stop(self): def stop(self):
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
"batch_create_tbl_num": 20, "batch_create_tbl_num": 20,
"data_source": "rand", "data_source": "rand",
"insert_mode": "taosc", "insert_mode": "taosc",
"insert_rows": 150, "insert_rows": 100,
"childtable_limit": -1, "childtable_limit": -1,
"childtable_offset":0, "childtable_offset":0,
"multi_thread_write_one_tbl": "no", "multi_thread_write_one_tbl": "no",
...@@ -58,6 +58,138 @@ ...@@ -58,6 +58,138 @@
"tags_file": "", "tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":2}, {"type": "nchar", "len": 32, "count":2}], "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":2}, {"type": "nchar", "len": 32, "count":2}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY","count":1}, {"type": "nchar", "count":2}] "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY","count":1}, {"type": "nchar", "count":2}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 20,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "rest",
"insert_rows": 100,
"childtable_limit": -1,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":2}, {"type": "nchar", "len": 32, "count":2}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY","count":1}, {"type": "nchar", "count":2}]
},
{
"name": "stb2",
"child_table_exists":"no",
"childtable_count": 30,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "stmt",
"insert_rows": 100,
"childtable_limit": -1,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":2}, {"type": "nchar", "len": 32, "count":2}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY","count":1}, {"type": "nchar", "count":2}]
},
{
"name": "stb3",
"child_table_exists":"no",
"childtable_count": 40,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "sml",
"insert_rows": 100,
"childtable_limit": -1,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":2}, {"type": "nchar", "len": 32, "count":2}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY","count":1}, {"type": "nchar", "count":2}]
},
{
"name": "stb4",
"child_table_exists":"no",
"childtable_count": 50,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "sml",
"line_protocol": "telnet",
"insert_rows": 100,
"childtable_limit": -1,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}],
"tags": [{"type": "TINYINT", "count":2}]
},
{
"name": "stb5",
"child_table_exists":"no",
"childtable_count": 60,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "sml",
"line_protocol": "json",
"insert_rows": 100,
"childtable_limit": -1,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}],
"tags": [{"type": "TINYINT"}]
}] }]
}] }]
} }
...@@ -85,16 +85,16 @@ ...@@ -85,16 +85,16 @@
"tags": [{"type": "TINYINT", "count":1}] "tags": [{"type": "TINYINT", "count":1}]
}, },
{ {
"name": "stb3", "name": "stb2",
"child_table_exists":"no", "child_table_exists":"no",
"childtable_count": 5, "childtable_count": 3,
"childtable_prefix": "stb03_", "childtable_prefix": "stb03_",
"auto_create_table": "no", "auto_create_table": "no",
"batch_create_tbl_num": 100, "batch_create_tbl_num": 100,
"data_source": "rand", "data_source": "rand",
"insert_mode": "sml", "insert_mode": "sml",
"line_protocol": "json" , "line_protocol": "json" ,
"insert_rows":50, "insert_rows":10,
"childtable_limit": -1, "childtable_limit": -1,
"childtable_offset":0, "childtable_offset":0,
"interlace_rows": 32767, "interlace_rows": 32767,
...@@ -111,16 +111,16 @@ ...@@ -111,16 +111,16 @@
"tags": [{"type": "BIGINT", "count":1}] "tags": [{"type": "BIGINT", "count":1}]
}, },
{ {
"name": "stb4", "name": "stb3",
"child_table_exists":"no", "child_table_exists":"no",
"childtable_count": 20, "childtable_count": 4,
"childtable_prefix": "stb04_", "childtable_prefix": "stb04_",
"auto_create_table": "no", "auto_create_table": "no",
"batch_create_tbl_num": 100, "batch_create_tbl_num": 100,
"data_source": "rand", "data_source": "rand",
"insert_mode": "sml", "insert_mode": "sml",
"line_protocol": "json" , "line_protocol": "json" ,
"insert_rows":50, "insert_rows":10,
"childtable_limit": -1, "childtable_limit": -1,
"childtable_offset":0, "childtable_offset":0,
"interlace_rows": 32767, "interlace_rows": 32767,
...@@ -137,16 +137,16 @@ ...@@ -137,16 +137,16 @@
"tags": [{"type": "SMALLINT", "count":1}] "tags": [{"type": "SMALLINT", "count":1}]
}, },
{ {
"name": "stb5", "name": "stb4",
"child_table_exists":"no", "child_table_exists":"no",
"childtable_count": 40, "childtable_count": 5,
"childtable_prefix": "stb05_", "childtable_prefix": "stb05_",
"auto_create_table": "no", "auto_create_table": "no",
"batch_create_tbl_num": 100, "batch_create_tbl_num": 100,
"data_source": "rand", "data_source": "rand",
"insert_mode": "sml", "insert_mode": "sml",
"line_protocol": "json" , "line_protocol": "json" ,
"insert_rows":50, "insert_rows":10,
"childtable_limit": -1, "childtable_limit": -1,
"childtable_offset":0, "childtable_offset":0,
"interlace_rows": 32767, "interlace_rows": 32767,
...@@ -163,16 +163,16 @@ ...@@ -163,16 +163,16 @@
"tags": [{"type": "FLOAT", "count":1}] "tags": [{"type": "FLOAT", "count":1}]
}, },
{ {
"name": "stb6", "name": "stb5",
"child_table_exists":"no", "child_table_exists":"no",
"childtable_count": 15, "childtable_count": 6,
"childtable_prefix": "stb06_", "childtable_prefix": "stb06_",
"auto_create_table": "no", "auto_create_table": "no",
"batch_create_tbl_num": 100, "batch_create_tbl_num": 100,
"data_source": "rand", "data_source": "rand",
"insert_mode": "sml", "insert_mode": "sml",
"line_protocol": "json" , "line_protocol": "json" ,
"insert_rows":50, "insert_rows":10,
"childtable_limit": -1, "childtable_limit": -1,
"childtable_offset":0, "childtable_offset":0,
"interlace_rows": 32767, "interlace_rows": 32767,
...@@ -188,17 +188,43 @@ ...@@ -188,17 +188,43 @@
"columns": [{"type": "DOUBLE"}], "columns": [{"type": "DOUBLE"}],
"tags": [{"type": "DOUBLE", "count":1}] "tags": [{"type": "DOUBLE", "count":1}]
}, },
{
"name": "stb6",
"child_table_exists":"no",
"childtable_count": 7,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 100,
"data_source": "rand",
"insert_mode": "sml",
"line_protocol": "telnet" ,
"insert_rows":10,
"childtable_limit": -1,
"childtable_offset":0,
"interlace_rows": 32767,
"insert_interval":0,
"max_sql_len": 1025000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2012-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "UINT"}],
"tags": [{"type": "UINT", "count":1}]
},
{ {
"name": "stb7", "name": "stb7",
"child_table_exists":"no", "child_table_exists":"no",
"childtable_count": 10, "childtable_count": 8,
"childtable_prefix": "stb07_", "childtable_prefix": "stb07_",
"auto_create_table": "no", "auto_create_table": "no",
"batch_create_tbl_num": 100, "batch_create_tbl_num": 100,
"data_source": "rand", "data_source": "rand",
"insert_mode": "sml", "insert_mode": "sml",
"line_protocol": "json" , "line_protocol": "json" ,
"insert_rows":50, "insert_rows":10,
"childtable_limit": -1, "childtable_limit": -1,
"childtable_offset":0, "childtable_offset":0,
"interlace_rows": 32767, "interlace_rows": 32767,
...@@ -217,14 +243,14 @@ ...@@ -217,14 +243,14 @@
{ {
"name": "stb8", "name": "stb8",
"child_table_exists":"no", "child_table_exists":"no",
"childtable_count": 20, "childtable_count": 9,
"childtable_prefix": "stb08_", "childtable_prefix": "stb08_",
"auto_create_table": "no", "auto_create_table": "no",
"batch_create_tbl_num": 100, "batch_create_tbl_num": 100,
"data_source": "rand", "data_source": "rand",
"insert_mode": "sml", "insert_mode": "sml",
"line_protocol": "json" , "line_protocol": "json" ,
"insert_rows":50, "insert_rows":10,
"childtable_limit": -1, "childtable_limit": -1,
"childtable_offset":0, "childtable_offset":0,
"interlace_rows": 32767, "interlace_rows": 32767,
...@@ -243,14 +269,14 @@ ...@@ -243,14 +269,14 @@
{ {
"name": "stb9", "name": "stb9",
"child_table_exists":"no", "child_table_exists":"no",
"childtable_count": 3, "childtable_count": 10,
"childtable_prefix": "stb09_", "childtable_prefix": "stb09_",
"auto_create_table": "no", "auto_create_table": "no",
"batch_create_tbl_num": 100, "batch_create_tbl_num": 100,
"data_source": "rand", "data_source": "rand",
"insert_mode": "sml", "insert_mode": "sml",
"line_protocol": "json" , "line_protocol": "json" ,
"insert_rows":50, "insert_rows":10,
"childtable_limit": -1, "childtable_limit": -1,
"childtable_offset":0, "childtable_offset":0,
"interlace_rows": 32767, "interlace_rows": 32767,
...@@ -269,14 +295,14 @@ ...@@ -269,14 +295,14 @@
{ {
"name": "stb10", "name": "stb10",
"child_table_exists":"no", "child_table_exists":"no",
"childtable_count": 3, "childtable_count": 11,
"childtable_prefix": "stb10_", "childtable_prefix": "stb10_",
"auto_create_table": "no", "auto_create_table": "no",
"batch_create_tbl_num": 100, "batch_create_tbl_num": 100,
"data_source": "rand", "data_source": "rand",
"insert_mode": "sml", "insert_mode": "sml",
"line_protocol": "telnet" , "line_protocol": "telnet" ,
"insert_rows":50, "insert_rows":10,
"childtable_limit": -1, "childtable_limit": -1,
"childtable_offset":0, "childtable_offset":0,
"interlace_rows": 32767, "interlace_rows": 32767,
...@@ -295,14 +321,14 @@ ...@@ -295,14 +321,14 @@
{ {
"name": "stb11", "name": "stb11",
"child_table_exists":"no", "child_table_exists":"no",
"childtable_count": 3, "childtable_count": 12,
"childtable_prefix": "stb11_", "childtable_prefix": "stb11_",
"auto_create_table": "no", "auto_create_table": "no",
"batch_create_tbl_num": 100, "batch_create_tbl_num": 100,
"data_source": "rand", "data_source": "rand",
"insert_mode": "sml", "insert_mode": "sml",
"line_protocol": "telnet" , "line_protocol": "telnet" ,
"insert_rows":50, "insert_rows":10,
"childtable_limit": -1, "childtable_limit": -1,
"childtable_offset":0, "childtable_offset":0,
"interlace_rows": 32767, "interlace_rows": 32767,
...@@ -321,14 +347,14 @@ ...@@ -321,14 +347,14 @@
{ {
"name": "stb12", "name": "stb12",
"child_table_exists":"no", "child_table_exists":"no",
"childtable_count": 3, "childtable_count": 13,
"childtable_prefix": "stb12_", "childtable_prefix": "stb12_",
"auto_create_table": "no", "auto_create_table": "no",
"batch_create_tbl_num": 100, "batch_create_tbl_num": 100,
"data_source": "rand", "data_source": "rand",
"insert_mode": "sml", "insert_mode": "sml",
"line_protocol": "telnet" , "line_protocol": "telnet" ,
"insert_rows":50, "insert_rows":10,
"childtable_limit": -1, "childtable_limit": -1,
"childtable_offset":0, "childtable_offset":0,
"interlace_rows": 32767, "interlace_rows": 32767,
......
...@@ -97,15 +97,15 @@ class TDTestCase: ...@@ -97,15 +97,15 @@ class TDTestCase:
tdSql.checkData(0, 0, 4000) tdSql.checkData(0, 0, 4000)
# # insert-interface: sml-json # insert-interface: sml-json
# os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-sml-json-alltype.json -y " % binPath) os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-sml-json-alltype.json -y " % binPath)
# tdSql.execute("use db") tdSql.execute("use db")
# tdSql.query("show stables") tdSql.query("show stables")
# for i in range(13): for i in range(13):
# for j in range(13): for j in range(13):
# if tdSql.queryResult[i][0] == 'stb%d'%j: if tdSql.queryResult[i][0] == 'stb%d'%j:
# # print(i,"stb%d"%j) # print(i,"stb%d"%j)
# tdSql.checkData(i, 4, j+1) tdSql.checkData(i, 4, j+1)
# insert-interface: sml-telnet # insert-interface: sml-telnet
......
...@@ -95,18 +95,18 @@ class TDTestCase: ...@@ -95,18 +95,18 @@ class TDTestCase:
tdSql.query("select count(*) from `test.0`") tdSql.query("select count(*) from `test.0`")
tdSql.checkData(0, 0, 100) tdSql.checkData(0, 0, 100)
# tdLog.info("use diffrent interface rest") tdLog.info("use diffrent interface rest")
# tdSql.execute("drop database db1;") tdSql.execute("drop database db1;")
# # use diffrent interface -rest # use diffrent interface -rest
# os.system("%staosBenchmark -u root -c %s -h localhost -P 6030 -d db1 -a 1 -l 10 -b float,int,NCHAR\(15\) -w 4097 \ os.system("%staosBenchmark -u root -c %s -h localhost -P 6030 -d db1 -a 1 -l 10 -b float,int,NCHAR\(15\) -w 4097 \
# -T 8 -i 10 -S 1000 -r 1000000 -t 10 -n 100 -M -x -y -O 10 -R 100 -E -m test. -I rest " % (binPath,cfgPath)) -T 8 -i 10 -S 1000 -r 1000000 -t 10 -n 100 -M -x -y -O 10 -R 100 -E -m test. -I rest " % (binPath,cfgPath))
# tdSql.execute("use db1") tdSql.execute("use db1")
# tdSql.query("select count(*) from meters") tdSql.query("select count(*) from meters")
# tdSql.checkData(0, 0, 1000) tdSql.checkData(0, 0, 1000)
# tdSql.query("select count(tbname) from meters") tdSql.query("select count(tbname) from meters")
# tdSql.checkData(0, 0, 10) tdSql.checkData(0, 0, 10)
# tdSql.query("select count(*) from `test.0`") tdSql.query("select count(*) from `test.0`")
# tdSql.checkData(0, 0, 100) tdSql.checkData(0, 0, 100)
tdLog.info("use diffrent interface sml") tdLog.info("use diffrent interface sml")
tdSql.execute("drop database db1;") tdSql.execute("drop database db1;")
...@@ -121,7 +121,7 @@ class TDTestCase: ...@@ -121,7 +121,7 @@ class TDTestCase:
tdLog.info("all data type") tdLog.info("all data type")
tdSql.execute("drop database db1;") tdSql.execute("drop database db1;")
# all data type # all data type-taosc
os.system("%staosBenchmark -u root -c %s -h localhost -P 6030 -d db1 -a 1 -l 10 \ os.system("%staosBenchmark -u root -c %s -h localhost -P 6030 -d db1 -a 1 -l 10 \
-b INT,TIMESTAMP,BIGINT,FLOAT,DOUBLE,SMALLINT,TINYINT,BOOL,UINT,UBIGINT,UTINYINT,USMALLINT,BINARY\(15\),NCHAR\(15\) -w 4096 \ -b INT,TIMESTAMP,BIGINT,FLOAT,DOUBLE,SMALLINT,TINYINT,BOOL,UINT,UBIGINT,UTINYINT,USMALLINT,BINARY\(15\),NCHAR\(15\) -w 4096 \
-T 8 -i 10 -S 1000 -r 1000000 -t 10 -n 100 -M -x -y -O 10 -R 100 -E -m test. " % (binPath,cfgPath)) -T 8 -i 10 -S 1000 -r 1000000 -t 10 -n 100 -M -x -y -O 10 -R 100 -E -m test. " % (binPath,cfgPath))
...@@ -132,6 +132,43 @@ class TDTestCase: ...@@ -132,6 +132,43 @@ class TDTestCase:
tdSql.checkData(0, 0, 10) tdSql.checkData(0, 0, 10)
tdSql.query("select count(*) from `test.0`") tdSql.query("select count(*) from `test.0`")
tdSql.checkData(0, 0, 100) tdSql.checkData(0, 0, 100)
tdLog.info("all data type")
tdSql.execute("drop database db1;")
# all data type-stmt
os.system("%staosBenchmark -u root -c %s -h localhost -P 6030 -d db1 -a 1 -l 10 \
-b INT,TIMESTAMP,BIGINT,FLOAT,DOUBLE,SMALLINT,TINYINT,BOOL,UINT,UBIGINT,UTINYINT,USMALLINT,BINARY\(15\),NCHAR\(15\) -w 4096 \
-T 8 -i 10 -S 1000 -r 1000000 -t 10 -n 100 -M -x -y -O 10 -R 100 -E -m test. -I stmt " % (binPath,cfgPath))
tdSql.execute("use db1")
tdSql.query("select count(*) from meters")
tdSql.checkData(0, 0, 1000)
tdSql.query("select count(tbname) from meters")
tdSql.checkData(0, 0, 10)
tdSql.query("select count(*) from `test.0`")
tdSql.checkData(0, 0, 100)
# all data type-rest
os.system("%staosBenchmark -u root -c %s -h localhost -P 6030 -d db1 -a 1 -l 10 \
-b INT,TIMESTAMP,BIGINT,FLOAT,DOUBLE,SMALLINT,TINYINT,BOOL,UINT,UBIGINT,UTINYINT,USMALLINT,BINARY\(15\),NCHAR\(15\) -w 4096 \
-T 8 -i 10 -S 1000 -r 1000000 -t 10 -n 100 -M -x -y -O 10 -R 100 -E -m test. -I rest " % (binPath,cfgPath))
tdSql.execute("use db1")
tdSql.query("select count(*) from meters")
tdSql.checkData(0, 0, 1000)
tdSql.query("select count(tbname) from meters")
tdSql.checkData(0, 0, 10)
tdSql.query("select count(*) from `test.0`")
tdSql.checkData(0, 0, 100)
# # all data type-rest
# os.system("%staosBenchmark -u root -c %s -h localhost -P 6030 -d db1 -a 1 -l 10 \
# -b INT,BIGINT,FLOAT,DOUBLE,SMALLINT,TINYINT,BOOL,UINT,UBIGINT,UTINYINT,USMALLINT,BINARY\(15\),NCHAR\(15\) -w 4096 \
# -T 8 -i 10 -S 1000 -r 1000000 -t 10 -n 100 -M -x -y -O 10 -R 100 -E -m test. -I sml " % (binPath,cfgPath))
# tdSql.execute("use db1")
# tdSql.query("select count(*) from meters")
# tdSql.checkData(0, 0, 1000)
# tdSql.query("select count(tbname) from meters")
# tdSql.checkData(0, 0, 10)
# # tdSql.query("select count(*) from `test.0`")
# # tdSql.checkData(0, 0, 100)
tdLog.info("all data type and interlace rows") tdLog.info("all data type and interlace rows")
tdSql.execute("drop database db1;") tdSql.execute("drop database db1;")
...@@ -164,11 +201,18 @@ class TDTestCase: ...@@ -164,11 +201,18 @@ class TDTestCase:
tdSql.query("select count(*) from `test.0`") tdSql.query("select count(*) from `test.0`")
tdSql.checkData(0, 0, 100) tdSql.checkData(0, 0, 100)
# tdLog.info("%staosBenchmark -u root -c %s -h localhost -P 6030 -d db1 -a 1 -l 10 -b float,int,NCHAR\(4096\) \ # taosdemo error
# -w 40 -T 8 -i 10 -S 1000 -r 1000000 -t 10 -n 100 -M -x -y -O 10 -R 100 -E -m test. -I stmt" % (binPath,cfgPath)) # too max length
# # taosdemo error-exceeds max length sql = "%staosBenchmark -u root -c %s -h localhost -P 6030 -d db1 -a 1 -l 10 -b float,int,NCHAR\(4096\) \
# assert os.system("%staosBenchmark -u root -c %s -h localhost -P 6030 -d db1 -a 1 -l 10 -b float,int,NCHAR\(4096\) \ -w 40 -T 8 -i 10 -S 1000 -r 1000000 -t 10 -n 100 -M -x -y -O 10 -R 100 -E -m test. -I taosc" % (binPath,cfgPath)
# -w 40 -T 8 -i 10 -S 1000 -r 1000000 -t 10 -n 100 -M -x -y -O 10 -R 100 -E -m test. -I taosc" % (binPath,cfgPath)) != 0 tdLog.info("%s" % sql )
assert os.system("%s" % sql ) != 0
# error password
sql = "%staosBenchmark -u root -c %s -h localhost -P 6030 -p123 -d db1 -a 1 -l 10 -b float,int,NCHAR\(40\) \
-w 40 -T 8 -i 10 -S 1000 -r 1000000 -t 10 -n 100 -M -x -y -O 10 -R 100 -E -m test. -I stmt" % (binPath,cfgPath)
tdLog.info("%s" % sql )
assert os.system("%s" % sql ) != 0
testcaseFilename = os.path.split(__file__)[-1] testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf ./insert_res*.txt*") os.system("rm -rf ./insert_res*.txt*")
......
...@@ -74,22 +74,23 @@ class TDTestCase: ...@@ -74,22 +74,23 @@ class TDTestCase:
tdSql.query("select count(*) from stb1") tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 3000) tdSql.checkData(0, 0, 3000)
# # insert: using parament "insert_interval to controls spped of insert. # insert: using parament "insert_interval to controls spped of insert.
# # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。
# os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-interval-speed-sml.json -y" % binPath) os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insert-interval-speed-sml.json -y" % binPath)
# tdSql.execute("use db") tdSql.execute("use db")
# tdSql.query("select tbname from db.stb0") # tdSql.query("select count (tbname) from stb0")
# tdSql.checkRows(100 ) tdSql.query("select tbname from db.stb0")
# # tdSql.query("select count(*) from stb00_0") tdSql.checkRows(100 )
# # tdSql.checkData(0, 0, 20) # tdSql.query("select count(*) from stb00_0")
# tdSql.query("select count(*) from stb0") # tdSql.checkData(0, 0, 20)
# tdSql.checkData(0, 0, 2000) tdSql.query("select count(*) from stb0")
# tdSql.query("show stables") tdSql.checkData(0, 0, 2000)
# tdSql.checkData(1, 4, 20) tdSql.query("show stables")
# # tdSql.query("select count(*) from stb01_0") tdSql.checkData(1, 4, 20)
# # tdSql.checkData(0, 0, 35) # tdSql.query("select count(*) from stb01_0")
# tdSql.query("select count(*) from stb1") # tdSql.checkData(0, 0, 35)
# tdSql.checkData(0, 0, 700) tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 700)
# spend 2min30s for 3 testcases. # spend 2min30s for 3 testcases.
# insert: drop and child_table_exists combination test # insert: drop and child_table_exists combination test
...@@ -134,10 +135,13 @@ class TDTestCase: ...@@ -134,10 +135,13 @@ class TDTestCase:
# os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json -y " % binPath) # os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertColumnsAndTagNum4096-sml.json -y " % binPath)
# tdSql.query("select count(*) from db.stb0") # tdSql.query("select count(*) from db.stb0")
# tdSql.checkData(0, 0, 10000) # tdSql.checkData(0, 0, 10000)
# there is no limit of 4096 columns,so cancels this case
# tdSql.execute("drop database if exists db") # tdSql.execute("drop database if exists db")
# os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json -y " % binPath) # os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertInterlaceRowsLarge1M-sml.json -y " % binPath)
# tdSql.query("select count(*) from db.stb0") # tdSql.query("select count(*) from db.stb0")
# tdSql.checkRows(0) # tdSql.checkRows(0)
tdSql.execute("drop database if exists db") tdSql.execute("drop database if exists db")
os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json -y " % binPath) os.system("%staosBenchmark -f tools/taosdemoAllTest/sml/insertColumnsNum0-sml.json -y " % binPath)
tdSql.execute("use db") tdSql.execute("use db")
......
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import taos
import time
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def __init__(self):
self.err_case = 0
self.curret_case = 0
def caseDescription(self):
'''
case1 <cpwu>: [TD-11970] : there is no err return when create table using now+Ntimes.
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def check_td11970(self):
# this case expect all create table sql with now+Ntime is success.
tdSql.prepare()
tdSql.execute(f"create stable stb1(ts timestamp, c1 int) tags (tag1 int, tag2 timestamp)")
try:
tdSql.execute(f"create table t1 using stb1 tags(1, now-100b)")
tdSql.execute(f"create table t2 using stb1 tags(2, now-100u)")
tdSql.execute(f"create table t3 using stb1 tags(3, now-100a)")
tdSql.execute(f"create table t4 using stb1 tags(4, now-100s)")
tdSql.execute(f"create table t5 using stb1 tags(5, now-100m)")
tdSql.execute(f"create table t6 using stb1 tags(6, now-100h)")
tdSql.execute(f"create table t7 using stb1 tags(7, now-100d)")
tdSql.execute(f"create table t8 using stb1 tags(8, now-100w)")
tdSql.execute(f"create table t9 using stb1 tags(9, now+10b)")
tdSql.execute(f"create table t10 using stb1 tags(10, now+10u)")
tdSql.execute(f"create table t11 using stb1 tags(11, now+10a)")
tdSql.execute(f"create table t12 using stb1 tags(12, now+10s)")
tdSql.execute(f"create table t13 using stb1 tags(13, now+10m)")
tdSql.execute(f"create table t14 using stb1 tags(14, now+10h)")
tdSql.execute(f"create table t15 using stb1 tags(15, now+10d)")
tdSql.execute(f"create table t16 using stb1 tags(16, now+10w)")
self.curret_case += 1
tdLog.printNoPrefix("the case for td-11970 run passed")
except:
self.err_case += 1
tdLog.printNoPrefix("the case for td-11970 run failed")
pass
def run(self):
self.check_td11970()
if self.err_case > 0:
tdLog.exit(f"{self.err_case} case run failed")
else:
tdLog.success("all case run passed")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import taos
import time
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def __init__(self):
self.err_case = 0
self.curret_case = 0
def caseDescription(self):
'''
case1 <cpwu>: [TD-11256] query the super table in a mixed way of expression + tbanme and using group by tbname
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def create_stb(self):
basetime = int(round(time.time() * 1000))
tdSql.prepare()
tdSql.execute(f"create stable stb1(ts timestamp, c1 int) tags (tag1 int)")
for i in range(10):
tdSql.execute(f"create table t{i} using stb1 tags({i})")
tdSql.execute(f"insert into t{i} values ({basetime}, {i})")
pass
def check_td11256(self):
# this case expect connect is current after run group by sql
tdSql.query("select count(*) from stb1 group by ts")
try:
tdSql.error("select c1/2, tbname from stb1 group by tbname")
tdSql.query("show databases")
self.curret_case += 1
tdLog.printNoPrefix("the case1: td-11256 run passed")
except:
self.err_case += 1
tdLog.printNoPrefix("the case1: td-11256 run failed")
pass
def run(self):
self.create_stb()
self.check_td11256()
if self.err_case > 0:
tdLog.exit(f"{self.err_case} case for TD-11256 run failed")
else:
tdLog.success("case for TD-11256 run passed")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
...@@ -19,6 +19,7 @@ from util.log import * ...@@ -19,6 +19,7 @@ from util.log import *
from util.cases import * from util.cases import *
from util.sql import * from util.sql import *
from util.dnodes import * from util.dnodes import *
import subprocess
class TDTestCase: class TDTestCase:
def init(self, conn, logSql): def init(self, conn, logSql):
...@@ -48,10 +49,9 @@ class TDTestCase: ...@@ -48,10 +49,9 @@ class TDTestCase:
def caseDescription(self): def caseDescription(self):
''' '''
case1 <wenzhouwww>: [TD-11389] : case1 <wenzhouwww>: [TD-12344] :
this test case is an test case for cache error , it will let the cached data obtained by the client that has connected to taosd incorrect, this test case is an test case for unexpectd crash for session function , it will coredump taoshell ;
root cause : table schema is changed, tag hostname size is increased through schema-less insertion. The schema cache of client taos is not refreshed.
''' '''
return return
...@@ -81,9 +81,6 @@ class TDTestCase: ...@@ -81,9 +81,6 @@ class TDTestCase:
cfgPath = projPath + "/sim/dnode1/cfg " cfgPath = projPath + "/sim/dnode1/cfg "
return cfgPath return cfgPath
def run(self): def run(self):
tdSql.prepare() tdSql.prepare()
...@@ -91,17 +88,17 @@ class TDTestCase: ...@@ -91,17 +88,17 @@ class TDTestCase:
tdSql.execute("use testdb;") tdSql.execute("use testdb;")
tdSql.execute("create stable st (ts timestamp , id int , value double) tags(hostname binary(10) ,ind int);") tdSql.execute("create stable st (ts timestamp , id int , value double) tags(hostname binary(10) ,ind int);")
for i in range(self.num): for i in range(self.num):
tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+100*i,i*2,i+10.00))
tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+200*i,i*2,i+10.00))
tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+300*i,i*2,i+10.00))
tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+10000*i,i*2,i+10.00)) tdSql.execute("insert into sub_%s using st tags('host_%s' , %d) values (%d , %d , %f );"%(str(i),str(i),i*10,self.ts+10000*i,i*2,i+10.00))
tdSql.query('select elapsed(ts,10s) from sub_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1d) ;')
cfg_path = self.getcfgPath() cfg_path = self.getcfgPath()
print(cfg_path) print(cfg_path)
# tdSql.execute('select elapsed(ts,10s) from st where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1d) group by tbname;') # session not support super table tdSql.execute('select elapsed(ts,10s) from testdb.st where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" session(ts,1d) group by tbname;') # session not support super table
os.system("taos -c %s -s 'select elapsed(ts,10s) from testdb.st where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" session(ts,1d) group by tbname;' " % (cfg_path)) taos_cmd1= "taos -c %s -s 'select elapsed(ts,10s) from testdb.st where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" session(ts,1d) group by tbname;' " % (cfg_path)
_ = subprocess.check_output(taos_cmd1, shell=True).decode("utf-8")
def stop(self): def stop(self):
tdSql.close() tdSql.close()
......
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
cd ${WKC}/src/connector/C#
dotnet test
dotnet run --project src/test/Cases/Cases.csproj
cd ${WKC}/tests/examples/C#
dotnet run --project C#checker/C#checker.csproj
dotnet run --project TDengineTest/TDengineTest.csproj
dotnet run --project schemaless/schemaless.csproj
cd ${WKC}/tests/examples/C#/taosdemo
dotnet build -c Release
tree | true
./bin/Release/net5.0/taosdemo -c /etc/taos -y
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
cd ${WKC}/src/connector/nodejs
npm install
npm run test
cd ${WKC}/tests/examples/nodejs
npm install td2.0-connector > /dev/null 2>&1
node nodejsChecker.js host=localhost
node test1970.js
cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport
npm install td2.0-connector > /dev/null 2>&1
node nanosecondTest.js
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../src/connector/python
pip3 install pytest
pytest tests/
python3 examples/bind-multi.py
python3 examples/bind-row.py
python3 examples/demo.py
python3 examples/insert-lines.py
python3 examples/pep-249.py
python3 examples/query-async.py
python3 examples/query-objectively.py
python3 examples/subscribe-sync.py
python3 examples/subscribe-async.py
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
#!/bin/bash
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
stopTaosd
rm -rf /var/lib/taos/*
rm -rf /var/log/taos/*
nohup taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 10
cd ../../
WKC=`pwd`
\ No newline at end of file
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from taosdata
#
###################################################################
# -*- coding: utf-8 -*-
import taos
import time
import requests
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def __init__(self):
self.err_case = 0
self.curret_case = 0
self.url = "http://127.0.0.1:6041/rest/sql"
self.header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='}
def caseDescription(self):
'''
case1 <cpwu>: [TD-12163] alter table-schema using restful interface
'''
return
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def check_td12163(self):
# basetime = int(round(time.time() * 1000))
tdSql.prepare()
tdSql.execute(f"create stable stb1(ts timestamp, c1 int) tags (tag1 int)")
tdSql.execute(f"create table nt1 (nts timestamp, nc1 int)")
add_column_stb = "alter table db.stb1 add column c2 float"
drop_column_stb = "alter table db.stb1 drop column c2 "
add_column_ntb = "alter table db.nt1 add column nc2 float"
drop_column_ntb = "alter table db.nt1 drop column nc2 "
conn_add_stb = requests.post(url=self.url, headers=self.header, data=add_column_stb)
resp_code_stb_add = conn_add_stb.status_code
resp_add_stb = conn_add_stb.json()
try:
assert resp_code_stb_add//200 == 1
assert resp_add_stb["status"] == "succ"
self.curret_case += 1
tdLog.printNoPrefix("the case add column to stable successful")
except:
self.err_case += 1
tdLog.printNoPrefix("the case add column to stable failed")
conn_add_ntb = requests.post(url=self.url, headers=self.header, data=add_column_ntb)
resp_code_ntb_add = conn_add_ntb.status_code
resp_add_ntb = conn_add_ntb.json()
try:
assert resp_code_ntb_add//200 == 1
assert resp_add_ntb["status"] == "succ"
self.curret_case += 1
tdLog.printNoPrefix("the case add column to normal table successful")
except:
self.err_case += 1
tdLog.printNoPrefix("the case add column to normal table failed")
conn_drop_stb = requests.post(url=self.url, headers=self.header, data=drop_column_stb)
resp_code_stb_drop = conn_drop_stb.status_code
resp_drop_stb = conn_drop_stb.json()
try:
assert resp_code_stb_drop // 200 == 1
assert resp_drop_stb["status"] == "succ"
self.curret_case += 1
tdLog.printNoPrefix("the case drop column to stable successful")
except:
self.err_case += 1
tdLog.printNoPrefix("the case add column to stable failed")
conn_drop_ntb = requests.post(url=self.url, headers=self.header, data=drop_column_ntb)
resp_code_ntb_drop = conn_drop_ntb.status_code
resp_drop_ntb = conn_drop_ntb.json()
try:
assert resp_code_ntb_drop // 200 == 1
assert resp_drop_ntb["status"] == "succ"
self.curret_case += 1
tdLog.printNoPrefix("the case drop column to stable successful")
except:
self.err_case += 1
tdLog.printNoPrefix("the case add column to stable failed")
pass
def run(self):
self.check_td12163()
if self.err_case > 0:
tdLog.exit(f"{self.err_case} case for TD-12163 run failed")
else:
tdLog.success("case for TD-12163 run passed")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
bash 3-connectors/c#/test.sh
bash 3-connectors/go/test.sh
bash 3-connectors/java/test.sh
bash 3-connectors/nodejs/test.sh
bash 3-connectors/python/test.sh
bash 3-connectors/restful/test.sh
bash 3-connectors/rust/test.sh
#python3 ./test.py -f 1-insert/batchInsert.py
python3 ./test.py -f 1-insert/batchInsert.py
python3 test.py -f 1-insert/TD-11970.py
python3 ./test.py -f 0-others/create_col_tag.py python3 ./test.py -f 0-others/create_col_tag.py
\ No newline at end of file
python3 ./test.py -f 2-query/TD-11389.py python3 ./test.py -f 2-query/TD-11256.py
# python3 ./test.py -f 2-query/TD-11389.py
python3 ./test.py -f 2-query/TD-11945_crash.py python3 ./test.py -f 2-query/TD-11945_crash.py
python3 ./test.py -f 2-query/TD-12340-12342.py python3 ./test.py -f 2-query/TD-12340-12342.py
python3 ./test.py -f 2-query/TD-12344.py python3 ./test.py -f 2-query/TD-12344.py
python3 test.py -f 4-taosAdapter/TD-12163.py
python3 ./test.py -f 4-taosAdapter/taosAdapter_insert.py python3 ./test.py -f 4-taosAdapter/taosAdapter_insert.py
python3 ./test.py -f 4-taosAdapter/taosAdapter_query.py python3 ./test.py -f 4-taosAdapter/taosAdapter_query.py
\ No newline at end of file
#!/bin/bash
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
tests_dir=`pwd`
IN_TDINTERNAL="community"
function stopTaosd {
echo "Stop taosd"
sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail'
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
function dohavecore(){
corefile=`find $corepath -mmin 1`
if [ -n "$corefile" ];then
core_file=`echo $corefile|cut -d " " -f2`
proc=`file $core_file|awk -F "execfn:" '/execfn:/{print $2}'|tr -d \' |awk '{print $1}'|tr -d \,`
echo 'taosd or taos has generated core'
rm case.log
if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then
cd ../../../
tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz debug/build/bin/taosd debug/build/bin/tsim debug/build/lib/libtaos*so*
if [[ $2 == 1 ]];then
cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S"`
else
cd community
cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
fi
else
cd ../../
if [[ $1 == 1 ]];then
tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz debug/build/bin/taosd debug/build/bin/tsim debug/build/lib/libtaos*so*
cp -r sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
fi
fi
if [[ $1 == 1 ]];then
echo '\n'|gdb $proc $core_file -ex "bt 10" -ex quit
exit 8
fi
fi
}
function runPyCaseOneByOne {
while read -r line; do
if [[ $line =~ ^python.* ]]; then
if [[ $line != *sleep* ]]; then
if [[ $line =~ '-r' ]];then
case=`echo $line|awk '{print $4}'`
else
case=`echo $line|awk '{print $NF}'`
fi
start_time=`date +%s`
date +%F\ %T | tee -a pytest-out.log
echo -n $case
$line > /dev/null 2>&1 && \
echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \
echo -e "${RED} failed${NC}" | tee -a pytest-out.log
end_time=`date +%s`
out_log=`tail -1 pytest-out.log `
# if [[ $out_log =~ 'failed' ]];then
# exit 8
# fi
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log
else
$line > /dev/null 2>&1
fi
fi
done < $1
}
function runPyCaseOneByOnefq() {
end=`sed -n '$=' $1`
for ((i=1;i<=$end;i++)) ; do
if [[ $(($i%$2)) -eq $4 ]];then
line=`sed -n "$i"p $1`
if [[ $line =~ ^python.* ]]; then
if [[ $line != *sleep* ]]; then
if [[ $line =~ '-r' ]];then
case=`echo $line|awk '{print $4}'`
else
case=`echo $line|awk '{print $NF}'`
fi
start_time=`date +%s`
date +%F\ %T | tee -a pytest-out.log
echo -n $case
if [[ $1 =~ full ]] ; then
line=$line" -s"
fi
$line > case.log 2>&1 && \
echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \
echo -e "${RED} failed${NC}" | tee -a pytest-out.log
end_time=`date +%s`
out_log=`tail -1 pytest-out.log `
if [[ $out_log =~ 'failed' ]];then
cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
echo '=====================log===================== '
cat case.log
rm -rf case.log
dohavecore $3 2
if [[ $3 == 1 ]];then
exit 8
fi
fi
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log
else
$line > /dev/null 2>&1
fi
dohavecore $3 2
else
echo $line
if [[ $line =~ ^bash.* ]]; then
# $line > case.log 2>&1 || cat case.log && exit 8
# cat case.log
$line > case.log 2>&1
if [ $? -ne 0 ];then
cat case.log
exit 8
fi
fi
fi
fi
done
rm -rf ../../sim/case.log
}
######################
# main entry
######################
unameOut="$(uname -s)"
case "${unameOut}" in
Linux*) OS=Linux;;
Darwin*) OS=Darwin;;
CYGWIN*) OS=Windows;;
*) OS=Unknown;;
esac
case "${OS}" in
Linux*) TAOSLIB=libtaos.so;;
Darwin*) TAOSLIB=libtaos.dylib;;
Windows*) TAOSLIB=taos.dll;;
Unknown) TAOSLIB="UNKNOWN:${unameOut}";;
esac
echo TAOSLIB is ${TAOSLIB}
totalFailed=0
totalPyFailed=0
totalJDBCFailed=0
totalUnitFailed=0
totalExampleFailed=0
totalApiFailed=0
if [ "${OS}" == "Linux" ]; then
corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern`
if [ -z "$corepath" ];then
echo "/coredump/core_%e_%p_%t" > /proc/sys/kernel/core_pattern || echo "Permission denied"
corepath="/coredump/"
fi
fi
echo "### run Python test case ###"
cd $tests_dir
if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then
cd ../..
else
cd ../
fi
TOP_DIR=`pwd`
TAOSLIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1`
if [[ "$TAOSLIB_DIR" == *"$IN_TDINTERNAL"* ]]; then
LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4,5`
else
LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4`
fi
export LD_LIBRARY_PATH=$TOP_DIR/$LIB_DIR:$LD_LIBRARY_PATH
cd $tests_dir/pytest
[ -f pytest-out.log ] && rm -f pytest-out.log
if [ "$1" == "full" ]; then
echo "### run Python full test ###"
runPyCaseOneByOne fulltest-tools.sh
runPyCaseOneByOne fulltest-query.sh
runPyCaseOneByOne fulltest-other.sh
runPyCaseOneByOne fulltest-insert.sh
runPyCaseOneByOne fulltest-connector.sh
else
echo "### run $1 $2 test ###"
if [ "$1" != "query" ] && [ "$1" != "taosAdapter" ] && [ "$1" != "other" ] && [ "$1" != "tools" ] && [ "$1" != "insert" ] && [ "$1" != "connector" ] ;then
echo " wrong option:$1 must one of [query,other,tools,insert,connector,taosAdapter]"
exit 8
fi
cd $tests_dir/pytest
runPyCaseOneByOnefq fulltest-$1.sh $2 1 $3
cd $tests_dir/develop-test
runPyCaseOneByOnefq fulltest-$1.sh $2 1 $3
cd $tests_dir/system-test
runPyCaseOneByOnefq fulltest-$1.sh $2 1 $3
fi
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册