Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
cf4f0d95
T
TDengine
项目概览
taosdata
/
TDengine
大约 2 年 前同步成功
通知
1193
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
cf4f0d95
编写于
12月 14, 2020
作者:
sangshuduo
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into docs/sangshuduo/update-doc-align-with-1.0.5
上级
e15f456d
234fd54e
变更
135
显示空白变更内容
内联
并排
Showing
135 changed file
with
5031 addition
and
744 deletion
+5031
-744
Jenkinsfile
Jenkinsfile
+73
-234
packaging/cfg/taos.cfg
packaging/cfg/taos.cfg
+8
-4
src/client/src/tscAsync.c
src/client/src/tscAsync.c
+2
-5
src/client/src/tscFunctionImpl.c
src/client/src/tscFunctionImpl.c
+77
-68
src/client/src/tscParseInsert.c
src/client/src/tscParseInsert.c
+8
-7
src/client/src/tscSQLParser.c
src/client/src/tscSQLParser.c
+22
-14
src/client/src/tscServer.c
src/client/src/tscServer.c
+11
-4
src/client/src/tscSubquery.c
src/client/src/tscSubquery.c
+15
-5
src/client/src/tscUtil.c
src/client/src/tscUtil.c
+4
-0
src/common/inc/tglobal.h
src/common/inc/tglobal.h
+2
-2
src/common/src/tglobal.c
src/common/src/tglobal.c
+10
-10
src/cq/src/cqMain.c
src/cq/src/cqMain.c
+1
-1
src/dnode/src/dnodeEps.c
src/dnode/src/dnodeEps.c
+1
-1
src/dnode/src/dnodeMRead.c
src/dnode/src/dnodeMRead.c
+0
-2
src/dnode/src/dnodeMWrite.c
src/dnode/src/dnodeMWrite.c
+0
-2
src/dnode/src/dnodeShell.c
src/dnode/src/dnodeShell.c
+15
-3
src/dnode/src/dnodeSystem.c
src/dnode/src/dnodeSystem.c
+19
-0
src/dnode/src/dnodeVMgmt.c
src/dnode/src/dnodeVMgmt.c
+2
-2
src/dnode/src/dnodeVRead.c
src/dnode/src/dnodeVRead.c
+8
-6
src/dnode/src/dnodeVWrite.c
src/dnode/src/dnodeVWrite.c
+0
-1
src/inc/taoserror.h
src/inc/taoserror.h
+3
-2
src/inc/taosmsg.h
src/inc/taosmsg.h
+4
-0
src/kit/shell/inc/shell.h
src/kit/shell/inc/shell.h
+1
-0
src/kit/shell/src/shellEngine.c
src/kit/shell/src/shellEngine.c
+45
-6
src/kit/shell/src/shellLinux.c
src/kit/shell/src/shellLinux.c
+4
-0
src/kit/shell/src/shellMain.c
src/kit/shell/src/shellMain.c
+15
-0
src/kit/shell/src/shellWindows.c
src/kit/shell/src/shellWindows.c
+4
-0
src/kit/taosdump/taosdump.c
src/kit/taosdump/taosdump.c
+205
-98
src/mnode/inc/mnodeMnode.h
src/mnode/inc/mnodeMnode.h
+2
-2
src/mnode/src/mnodeDnode.c
src/mnode/src/mnodeDnode.c
+1
-1
src/mnode/src/mnodeMnode.c
src/mnode/src/mnodeMnode.c
+4
-4
src/mnode/src/mnodePeer.c
src/mnode/src/mnodePeer.c
+1
-1
src/mnode/src/mnodeProfile.c
src/mnode/src/mnodeProfile.c
+14
-7
src/mnode/src/mnodeRead.c
src/mnode/src/mnodeRead.c
+1
-1
src/mnode/src/mnodeShow.c
src/mnode/src/mnodeShow.c
+2
-2
src/mnode/src/mnodeVgroup.c
src/mnode/src/mnodeVgroup.c
+2
-1
src/mnode/src/mnodeWrite.c
src/mnode/src/mnodeWrite.c
+1
-1
src/query/inc/tsqlfunction.h
src/query/inc/tsqlfunction.h
+1
-2
src/query/src/qExecutor.c
src/query/src/qExecutor.c
+63
-63
src/query/src/qFilterfunc.c
src/query/src/qFilterfunc.c
+45
-28
src/rpc/src/rpcMain.c
src/rpc/src/rpcMain.c
+16
-8
src/rpc/src/rpcTcp.c
src/rpc/src/rpcTcp.c
+8
-1
src/rpc/test/rclient.c
src/rpc/test/rclient.c
+2
-1
src/sync/inc/syncInt.h
src/sync/inc/syncInt.h
+5
-1
src/sync/src/syncMain.c
src/sync/src/syncMain.c
+16
-11
src/sync/src/syncRestore.c
src/sync/src/syncRestore.c
+14
-10
src/sync/src/syncRetrieve.c
src/sync/src/syncRetrieve.c
+22
-14
src/util/inc/tconfig.h
src/util/inc/tconfig.h
+1
-0
src/util/src/tconfig.c
src/util/src/tconfig.c
+54
-0
src/util/src/ttimer.c
src/util/src/ttimer.c
+3
-2
src/vnode/inc/vnodeInt.h
src/vnode/inc/vnodeInt.h
+1
-0
src/vnode/src/vnodeMain.c
src/vnode/src/vnodeMain.c
+4
-3
src/vnode/src/vnodeRead.c
src/vnode/src/vnodeRead.c
+9
-9
src/vnode/src/vnodeWrite.c
src/vnode/src/vnodeWrite.c
+7
-3
src/wal/inc/walInt.h
src/wal/inc/walInt.h
+1
-1
src/wal/src/walWrite.c
src/wal/src/walWrite.c
+1
-3
tests/Jenkinsfile
tests/Jenkinsfile
+174
-45
tests/examples/JDBC/taosdemo/.gitignore
tests/examples/JDBC/taosdemo/.gitignore
+33
-0
tests/examples/JDBC/taosdemo/.mvn/wrapper/MavenWrapperDownloader.java
...es/JDBC/taosdemo/.mvn/wrapper/MavenWrapperDownloader.java
+118
-0
tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.jar
tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.jar
+0
-0
tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.properties
...mples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.properties
+2
-0
tests/examples/JDBC/taosdemo/mvnw
tests/examples/JDBC/taosdemo/mvnw
+322
-0
tests/examples/JDBC/taosdemo/mvnw.cmd
tests/examples/JDBC/taosdemo/mvnw.cmd
+182
-0
tests/examples/JDBC/taosdemo/pom.xml
tests/examples/JDBC/taosdemo/pom.xml
+117
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosdemoApplication.java
.../main/java/com/taosdata/taosdemo/TaosdemoApplication.java
+15
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/TaosDemoCommandLineRunner.java
...osdata/taosdemo/components/TaosDemoCommandLineRunner.java
+174
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/DatabaseController.java
.../com/taosdata/taosdemo/controller/DatabaseController.java
+40
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/InsertController.java
...va/com/taosdata/taosdemo/controller/InsertController.java
+17
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SubTableController.java
.../com/taosdata/taosdemo/controller/SubTableController.java
+45
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SuperTableController.java
...om/taosdata/taosdemo/controller/SuperTableController.java
+26
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/TableController.java
...ava/com/taosdata/taosdemo/controller/TableController.java
+11
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldMeta.java
...src/main/java/com/taosdata/taosdemo/domain/FieldMeta.java
+17
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldValue.java
...rc/main/java/com/taosdata/taosdemo/domain/FieldValue.java
+17
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/RowValue.java
.../src/main/java/com/taosdata/taosdemo/domain/RowValue.java
+15
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableMeta.java
.../main/java/com/taosdata/taosdemo/domain/SubTableMeta.java
+15
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableValue.java
...main/java/com/taosdata/taosdemo/domain/SubTableValue.java
+15
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SuperTableMeta.java
...ain/java/com/taosdata/taosdemo/domain/SuperTableMeta.java
+14
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableMeta.java
...src/main/java/com/taosdata/taosdemo/domain/TableMeta.java
+13
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableValue.java
...rc/main/java/com/taosdata/taosdemo/domain/TableValue.java
+15
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagMeta.java
...o/src/main/java/com/taosdata/taosdemo/domain/TagMeta.java
+18
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagValue.java
.../src/main/java/com/taosdata/taosdemo/domain/TagValue.java
+17
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.java
...ain/java/com/taosdata/taosdemo/mapper/DatabaseMapper.java
+27
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.xml
...main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.xml
+48
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.java
...ain/java/com/taosdata/taosdemo/mapper/SubTableMapper.java
+30
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.xml
...main/java/com/taosdata/taosdemo/mapper/SubTableMapper.xml
+81
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.java
...n/java/com/taosdata/taosdemo/mapper/SuperTableMapper.java
+33
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.xml
...in/java/com/taosdata/taosdemo/mapper/SuperTableMapper.xml
+41
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.java
...c/main/java/com/taosdata/taosdemo/mapper/TableMapper.java
+28
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.xml
...rc/main/java/com/taosdata/taosdemo/mapper/TableMapper.xml
+68
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/AbstractService.java
...n/java/com/taosdata/taosdemo/service/AbstractService.java
+35
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java
...n/java/com/taosdata/taosdemo/service/DatabaseService.java
+38
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java
...n/java/com/taosdata/taosdemo/service/SubTableService.java
+118
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java
...java/com/taosdata/taosdemo/service/SuperTableService.java
+22
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java
...main/java/com/taosdata/taosdemo/service/TableService.java
+42
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/FieldValueGenerator.java
...m/taosdata/taosdemo/service/data/FieldValueGenerator.java
+48
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java
...taosdata/taosdemo/service/data/SubTableMetaGenerator.java
+30
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableValueGenerator.java
...aosdata/taosdemo/service/data/SubTableValueGenerator.java
+84
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SuperTableMetaGenerator.java
...osdata/taosdemo/service/data/SuperTableMetaGenerator.java
+80
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/TagValueGenerator.java
...com/taosdata/taosdemo/service/data/TagValueGenerator.java
+24
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/DataGenerator.java
.../main/java/com/taosdata/taosdemo/utils/DataGenerator.java
+120
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/JdbcTaosdemoConfig.java
.../java/com/taosdata/taosdemo/utils/JdbcTaosdemoConfig.java
+204
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TaosConstants.java
.../main/java/com/taosdata/taosdemo/utils/TaosConstants.java
+8
-0
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TimeStampUtil.java
.../main/java/com/taosdata/taosdemo/utils/TimeStampUtil.java
+67
-0
tests/examples/JDBC/taosdemo/src/main/resources/application.properties
...s/JDBC/taosdemo/src/main/resources/application.properties
+14
-0
tests/examples/JDBC/taosdemo/src/main/resources/log4j.properties
...xamples/JDBC/taosdemo/src/main/resources/log4j.properties
+21
-0
tests/examples/JDBC/taosdemo/src/main/resources/templates/index.html
...les/JDBC/taosdemo/src/main/resources/templates/index.html
+10
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/TaosdemoApplicationTests.java
.../java/com/taosdata/taosdemo/TaosdemoApplicationTests.java
+13
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/DatabaseMapperTest.java
...java/com/taosdata/taosdemo/mapper/DatabaseMapperTest.java
+42
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SubTableMapperTest.java
...java/com/taosdata/taosdemo/mapper/SubTableMapperTest.java
+88
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SuperTableMapperTest.java
...va/com/taosdata/taosdemo/mapper/SuperTableMapperTest.java
+50
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/TableMapperTest.java
...st/java/com/taosdata/taosdemo/mapper/TableMapperTest.java
+142
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java
...va/com/taosdata/taosdemo/service/DatabaseServiceTest.java
+29
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SubTableServiceTest.java
...va/com/taosdata/taosdemo/service/SubTableServiceTest.java
+50
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java
.../com/taosdata/taosdemo/service/SuperTableServiceTest.java
+39
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java
.../java/com/taosdata/taosdemo/service/TableServiceTest.java
+43
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/FieldValueGeneratorTest.java
...osdata/taosdemo/service/data/FieldValueGeneratorTest.java
+59
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SubTableMetaGeneratorTest.java
...data/taosdemo/service/data/SubTableMetaGeneratorTest.java
+52
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SuperTableMetaGeneratorImplTest.java
...aosdemo/service/data/SuperTableMetaGeneratorImplTest.java
+60
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/TagValueGeneratorTest.java
...taosdata/taosdemo/service/data/TagValueGeneratorTest.java
+37
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/DataGeneratorTest.java
...t/java/com/taosdata/taosdemo/utils/DataGeneratorTest.java
+20
-0
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/TimeStampUtilTest.java
...t/java/com/taosdata/taosdemo/utils/TimeStampUtilTest.java
+38
-0
tests/pytest/concurrent_inquiry.py
tests/pytest/concurrent_inquiry.py
+98
-17
tests/pytest/fulltest.sh
tests/pytest/fulltest.sh
+4
-2
tests/pytest/functions/function_twa_test2.py
tests/pytest/functions/function_twa_test2.py
+124
-0
tests/pytest/pytest_1.sh
tests/pytest/pytest_1.sh
+2
-1
tests/pytest/query/isNullTest.py
tests/pytest/query/isNullTest.py
+128
-0
tests/pytest/query/queryNullValueTest.py
tests/pytest/query/queryNullValueTest.py
+2
-2
tests/pytest/query/queryWithTaosdKilled.py
tests/pytest/query/queryWithTaosdKilled.py
+68
-0
tests/pytest/stream/stream2.py
tests/pytest/stream/stream2.py
+10
-0
tests/pytest/update/append_commit_data.py
tests/pytest/update/append_commit_data.py
+35
-11
tests/pytest/util/dnodes.py
tests/pytest/util/dnodes.py
+13
-3
tests/script/general/parser/function.sim
tests/script/general/parser/function.sim
+22
-4
tests/script/general/parser/tags_filter.sim
tests/script/general/parser/tags_filter.sim
+53
-0
tests/script/general/parser/testSuite.sim
tests/script/general/parser/testSuite.sim
+2
-0
tests/script/unique/arbitrator/insert_duplicationTs.sim
tests/script/unique/arbitrator/insert_duplicationTs.sim
+95
-2
未找到文件。
Jenkinsfile
浏览文件 @
cf4f0d95
pipeline
{
agent
none
environment
{
WK
=
'/var/lib/jenkins/workspace/TDinternal'
WKC
=
'/var/lib/jenkins/workspace/TDinternal/community'
}
stages
{
stage
(
'Parallel test stage'
)
{
parallel
{
stage
(
'pytest'
)
{
agent
{
label
'184'
}
steps
{
properties
([
pipelineTriggers
([
githubPush
()])])
node
{
git
url:
'https://github.com/taosdata/TDengine'
}
// execute this before anything else, including requesting any time on an agent
if
(
currentBuild
.
rawBuild
.
getCauses
().
toString
().
contains
(
'BranchIndexingCause'
))
{
print
"INFO: Build skipped due to trigger being Branch Indexing"
currentBuild
.
result
=
'ABORTED'
// optional, gives a better hint to the user that it's been skipped, rather than the default which shows it's successful
return
}
def
pre_test
(){
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
sudo rmtaos
'''
}
sh
'''
date
cd ${WKC}
git reset --hard
git checkout develop
git pull
git submodule update
rm -rf *
cd ${WK}
git reset --hard
git checkout develop
git pull
cd ${WKC}
rm -rf *
mv ${WORKSPACE}/* .
cd ${WK}
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
...
...
@@ -31,62 +39,44 @@ pipeline {
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
#./test-all.sh smoke
./test-all.sh pytest
'''
return
1
}
pipeline
{
agent
none
environment
{
WK
=
'/var/lib/jenkins/workspace/TDinternal'
WKC
=
'/var/lib/jenkins/workspace/TDinternal/community'
}
stages
{
stage
(
'Parallel test stage'
)
{
parallel
{
stage
(
'python p1'
)
{
agent
{
label
'p1'
}
steps
{
pre_test
()
sh
'''
cd ${WKC}/tests
./test-all.sh p1
date'''
}
}
stage
(
'test_b1'
)
{
agent
{
label
'
master
'
}
agent
{
label
'
b1
'
}
steps
{
pre_test
()
sh
'''
cd ${WKC}
git reset --hard
git checkout develop
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout develop
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
cd ${WKC}/tests
#./test-all.sh smoke
./test-all.sh b1
date'''
}
}
stage
(
'test_crash_gen'
)
{
agent
{
label
"
185
"
}
agent
{
label
"
b2
"
}
steps
{
sh
'''
cd ${WKC}
git reset --hard
git checkout develop
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout develop
git pull
export TZ=Asia/Harbin
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
cd ${WKC}/tests/pytest
'''
pre_test
()
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/pytest
...
...
@@ -109,193 +99,42 @@ pipeline {
}
stage
(
'test_valgrind'
)
{
agent
{
label
"
186
"
}
agent
{
label
"
b3
"
}
steps
{
pre_test
()
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}
git reset --hard
git checkout develop
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout develop
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
'''
}
sh
'''
date
cd ${WKC}/tests
./test-all.sh b3
date'''
}
}
stage
(
'
connector
'
){
agent
{
label
"
release
"
}
stage
(
'
python p2
'
){
agent
{
label
"
p2
"
}
steps
{
sh
'''
cd ${WORKSPACE}
git checkout develop
'''
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WORKSPACE}/tests/gotest
bash batchtest.sh
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single >/dev/null
java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
pre_test
()
sh
'''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
date
cd ${WKC}/tests
./test-all.sh p2
date
'''
}
}
}
stage
(
'arm64_build'
){
agent
{
label
'arm64'
}
steps
{
sh
'''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
stage
(
'arm32_build'
){
agent
{
label
'arm32'
}
steps
{
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
}
}
}
}
post
{
success
{
emailext
(
subject:
"SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'"
,
body:
'''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>'''
,
to:
"yqliu@taosdata.com,pxiao@taosdata.com"
,
from:
"support@taosdata.com"
)
}
failure
{
emailext
(
subject:
"FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'"
,
body:
'''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>'''
,
to:
"yqliu@taosdata.com,pxiao@taosdata.com"
,
from:
"support@taosdata.com"
)
}
}
}
packaging/cfg/taos.cfg
浏览文件 @
cf4f0d95
...
...
@@ -29,8 +29,12 @@
# number of threads per CPU core
# numOfThreadsPerCore 1.0
# the proportion of total threads responsible for query
# ratioOfQueryThreads 0.5
# the proportion of total CPU cores available for query processing
# 2.0: the query threads will be set to double of the CPU cores.
# 1.0: all CPU cores are available for query processing [default].
# 0.5: only half of the CPU cores are available for query.
# 0.0: only one core available.
# tsRatioOfQueryCores 1.0
# number of management nodes in the system
# numOfMnodes 3
...
...
@@ -265,5 +269,5 @@
# enable/disable stream (continuous query)
# stream 1
#
only 50% CPU resources will be used in query processing
#
halfCoresForQuery
0
#
in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
#
retrieveBlockingModel
0
src/client/src/tscAsync.c
浏览文件 @
cf4f0d95
...
...
@@ -365,6 +365,7 @@ void tscProcessFetchRow(SSchedMsg *pMsg) {
static
void
tscProcessAsyncError
(
SSchedMsg
*
pMsg
)
{
void
(
*
fp
)()
=
pMsg
->
ahandle
;
terrno
=
*
(
int32_t
*
)
pMsg
->
msg
;
tfree
(
pMsg
->
msg
);
(
*
fp
)(
pMsg
->
thandle
,
NULL
,
*
(
int32_t
*
)
pMsg
->
msg
);
}
...
...
@@ -447,9 +448,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
assert
(
pCmd
->
command
!=
TSDB_SQL_INSERT
);
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly.
// 2. vnode may need the schema information along with submit block to update its local table schema.
if
(
pCmd
->
command
==
TSDB_SQL_SELECT
)
{
tscDebug
(
"%p redo parse sql string and proceed"
,
pSql
);
pCmd
->
parseFinished
=
false
;
...
...
@@ -463,8 +461,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
}
tscProcessSql
(
pSql
);
}
else
{
// in all other cases, simple retry
}
else
{
// in all other cases, simple retry
tscProcessSql
(
pSql
);
}
...
...
src/client/src/tscFunctionImpl.c
浏览文件 @
cf4f0d95
...
...
@@ -3648,11 +3648,21 @@ static bool twa_function_setup(SQLFunctionCtx *pCtx) {
SResultRowCellInfo
*
pResInfo
=
GET_RES_INFO
(
pCtx
);
STwaInfo
*
pInfo
=
GET_ROWCELL_INTERBUF
(
pResInfo
);
pInfo
->
lastKey
=
INT64_MIN
;
pInfo
->
p
.
key
=
INT64_MIN
;
pInfo
->
win
=
TSWINDOW_INITIALIZER
;
return
true
;
}
static
double
twa_get_area
(
SPoint1
s
,
SPoint1
e
)
{
if
((
s
.
val
>=
0
&&
e
.
val
>=
0
)
||
(
s
.
val
<=
0
&&
e
.
val
<=
0
))
{
return
(
s
.
val
+
e
.
val
)
*
(
e
.
key
-
s
.
key
)
/
2
;
}
double
x
=
(
s
.
key
*
e
.
val
-
e
.
key
*
s
.
val
)
/
(
e
.
val
-
s
.
val
);
double
val
=
(
s
.
val
*
(
x
-
s
.
key
)
+
e
.
val
*
(
e
.
key
-
x
))
/
2
;
return
val
;
}
static
int32_t
twa_function_impl
(
SQLFunctionCtx
*
pCtx
,
int32_t
tsIndex
,
int32_t
index
,
int32_t
size
)
{
int32_t
notNullElems
=
0
;
TSKEY
*
primaryKey
=
pCtx
->
ptsList
;
...
...
@@ -3663,28 +3673,29 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t
int32_t
i
=
index
;
int32_t
step
=
GET_FORWARD_DIRECTION_FACTOR
(
pCtx
->
order
);
SPoint1
*
last
=
&
pInfo
->
p
;
if
(
pCtx
->
start
.
key
!=
INT64_MIN
)
{
assert
((
pCtx
->
start
.
key
<
primaryKey
[
tsIndex
+
i
]
&&
pCtx
->
order
==
TSDB_ORDER_ASC
)
||
(
pCtx
->
start
.
key
>
primaryKey
[
tsIndex
+
i
]
&&
pCtx
->
order
==
TSDB_ORDER_DESC
));
assert
(
pInfo
->
lastK
ey
==
INT64_MIN
);
assert
(
last
->
k
ey
==
INT64_MIN
);
pInfo
->
lastK
ey
=
primaryKey
[
tsIndex
+
i
];
GET_TYPED_DATA
(
pInfo
->
lastValue
,
double
,
pCtx
->
inputType
,
GET_INPUT_CHAR_INDEX
(
pCtx
,
index
));
last
->
k
ey
=
primaryKey
[
tsIndex
+
i
];
GET_TYPED_DATA
(
last
->
val
,
double
,
pCtx
->
inputType
,
GET_INPUT_CHAR_INDEX
(
pCtx
,
index
));
pInfo
->
dOutput
+=
((
pInfo
->
lastValue
+
pCtx
->
start
.
val
)
/
2
)
*
(
pInfo
->
lastKey
-
pCtx
->
start
.
key
);
pInfo
->
dOutput
+=
twa_get_area
(
pCtx
->
start
,
*
last
);
pInfo
->
hasResult
=
DATA_SET_FLAG
;
pInfo
->
win
.
skey
=
pCtx
->
start
.
key
;
notNullElems
++
;
i
+=
step
;
}
else
if
(
pInfo
->
lastK
ey
==
INT64_MIN
)
{
pInfo
->
lastK
ey
=
primaryKey
[
tsIndex
+
i
];
GET_TYPED_DATA
(
pInfo
->
lastValue
,
double
,
pCtx
->
inputType
,
GET_INPUT_CHAR_INDEX
(
pCtx
,
index
));
}
else
if
(
pInfo
->
p
.
k
ey
==
INT64_MIN
)
{
last
->
k
ey
=
primaryKey
[
tsIndex
+
i
];
GET_TYPED_DATA
(
last
->
val
,
double
,
pCtx
->
inputType
,
GET_INPUT_CHAR_INDEX
(
pCtx
,
index
));
pInfo
->
hasResult
=
DATA_SET_FLAG
;
pInfo
->
win
.
skey
=
pInfo
->
lastK
ey
;
pInfo
->
win
.
skey
=
last
->
k
ey
;
notNullElems
++
;
i
+=
step
;
}
...
...
@@ -3698,9 +3709,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t
continue
;
}
pInfo
->
dOutput
+=
((
val
[
i
]
+
pInfo
->
lastValue
)
/
2
)
*
(
primaryKey
[
i
+
tsIndex
]
-
pInfo
->
lastKey
)
;
pInfo
->
lastValue
=
val
[
i
]
;
pInfo
->
lastKey
=
primaryKey
[
i
+
tsIndex
]
;
SPoint1
st
=
{.
key
=
primaryKey
[
i
+
tsIndex
],
.
val
=
val
[
i
]}
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
)
;
pInfo
->
p
=
st
;
}
break
;
}
...
...
@@ -3711,9 +3722,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t
continue
;
}
pInfo
->
dOutput
+=
((
val
[
i
]
+
pInfo
->
lastValue
)
/
2
)
*
(
primaryKey
[
i
+
tsIndex
]
-
pInfo
->
lastKey
)
;
pInfo
->
lastValue
=
val
[
i
]
;
pInfo
->
lastKey
=
primaryKey
[
i
+
tsIndex
]
;
SPoint1
st
=
{.
key
=
primaryKey
[
i
+
tsIndex
],
.
val
=
val
[
i
]}
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
)
;
pInfo
->
p
=
st
;
}
break
;
}
...
...
@@ -3724,9 +3735,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t
continue
;
}
pInfo
->
dOutput
+=
((
val
[
i
]
+
pInfo
->
lastValue
)
/
2
)
*
(
primaryKey
[
i
+
tsIndex
]
-
pInfo
->
lastKey
)
;
pInfo
->
lastValue
=
val
[
i
]
;
pInfo
->
lastKey
=
primaryKey
[
i
+
tsIndex
]
;
SPoint1
st
=
{.
key
=
primaryKey
[
i
+
tsIndex
],
.
val
=
val
[
i
]}
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
)
;
pInfo
->
p
=
st
;
}
break
;
}
...
...
@@ -3737,9 +3748,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t
continue
;
}
pInfo
->
dOutput
+=
((
val
[
i
]
+
pInfo
->
lastValue
)
/
2
)
*
(
primaryKey
[
i
+
tsIndex
]
-
pInfo
->
lastKey
)
;
pInfo
->
lastValue
=
(
double
)
val
[
i
]
;
pInfo
->
lastKey
=
primaryKey
[
i
+
tsIndex
]
;
SPoint1
st
=
{.
key
=
primaryKey
[
i
+
tsIndex
],
.
val
=
(
double
)
val
[
i
]}
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
)
;
pInfo
->
p
=
st
;
}
break
;
}
...
...
@@ -3750,9 +3761,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t
continue
;
}
pInfo
->
dOutput
+=
((
val
[
i
]
+
pInfo
->
lastValue
)
/
2
)
*
(
primaryKey
[
i
+
tsIndex
]
-
pInfo
->
lastKey
)
;
pInfo
->
lastValue
=
val
[
i
]
;
pInfo
->
lastKey
=
primaryKey
[
i
+
tsIndex
]
;
SPoint1
st
=
{.
key
=
primaryKey
[
i
+
tsIndex
],
.
val
=
val
[
i
]}
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
)
;
pInfo
->
p
=
st
;
}
break
;
}
...
...
@@ -3763,9 +3774,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t
continue
;
}
pInfo
->
dOutput
+=
((
val
[
i
]
+
pInfo
->
lastValue
)
/
2
)
*
(
primaryKey
[
i
+
tsIndex
]
-
pInfo
->
lastKey
)
;
pInfo
->
lastValue
=
val
[
i
]
;
pInfo
->
lastKey
=
primaryKey
[
i
+
tsIndex
]
;
SPoint1
st
=
{.
key
=
primaryKey
[
i
+
tsIndex
],
.
val
=
val
[
i
]}
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
)
;
pInfo
->
p
=
st
;
}
break
;
}
...
...
@@ -3774,20 +3785,19 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t
// the last interpolated time window value
if
(
pCtx
->
end
.
key
!=
INT64_MIN
)
{
pInfo
->
dOutput
+=
((
pInfo
->
lastValue
+
pCtx
->
end
.
val
)
/
2
)
*
(
pCtx
->
end
.
key
-
pInfo
->
lastKey
);
pInfo
->
lastValue
=
pCtx
->
end
.
val
;
pInfo
->
lastKey
=
pCtx
->
end
.
key
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
pCtx
->
end
);
pInfo
->
p
=
pCtx
->
end
;
}
pInfo
->
win
.
ekey
=
pInfo
->
lastK
ey
;
pInfo
->
win
.
ekey
=
pInfo
->
p
.
k
ey
;
return
notNullElems
;
}
static
void
twa_function
(
SQLFunctionCtx
*
pCtx
)
{
void
*
data
=
GET_INPUT_CHAR
(
pCtx
);
void
*
data
=
GET_INPUT_CHAR
(
pCtx
);
SResultRowCellInfo
*
pResInfo
=
GET_RES_INFO
(
pCtx
);
STwaInfo
*
pInfo
=
GET_ROWCELL_INTERBUF
(
pResInfo
);
STwaInfo
*
pInfo
=
GET_ROWCELL_INTERBUF
(
pResInfo
);
// skip null value
int32_t
step
=
GET_FORWARD_DIRECTION_FACTOR
(
pCtx
->
order
);
...
...
@@ -3808,6 +3818,7 @@ static void twa_function(SQLFunctionCtx *pCtx) {
}
}
//TODO refactor
static
void
twa_function_f
(
SQLFunctionCtx
*
pCtx
,
int32_t
index
)
{
void
*
pData
=
GET_INPUT_CHAR_INDEX
(
pCtx
,
index
);
if
(
pCtx
->
hasNull
&&
isNull
(
pData
,
pCtx
->
inputType
))
{
...
...
@@ -3824,23 +3835,23 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
int32_t
size
=
pCtx
->
size
;
if
(
pCtx
->
start
.
key
!=
INT64_MIN
)
{
assert
(
pInfo
->
lastK
ey
==
INT64_MIN
);
assert
(
pInfo
->
p
.
k
ey
==
INT64_MIN
);
pInfo
->
lastK
ey
=
primaryKey
[
index
];
GET_TYPED_DATA
(
pInfo
->
lastValue
,
double
,
pCtx
->
inputType
,
GET_INPUT_CHAR_INDEX
(
pCtx
,
index
));
pInfo
->
p
.
k
ey
=
primaryKey
[
index
];
GET_TYPED_DATA
(
pInfo
->
p
.
val
,
double
,
pCtx
->
inputType
,
GET_INPUT_CHAR_INDEX
(
pCtx
,
index
));
pInfo
->
dOutput
+=
((
pInfo
->
lastValue
+
pCtx
->
start
.
val
)
/
2
)
*
(
pInfo
->
lastKey
-
pCtx
->
start
.
key
);
pInfo
->
dOutput
+=
twa_get_area
(
pCtx
->
start
,
pInfo
->
p
);
pInfo
->
hasResult
=
DATA_SET_FLAG
;
pInfo
->
win
.
skey
=
pCtx
->
start
.
key
;
notNullElems
++
;
i
+=
1
;
}
else
if
(
pInfo
->
lastK
ey
==
INT64_MIN
)
{
pInfo
->
lastK
ey
=
primaryKey
[
index
];
GET_TYPED_DATA
(
pInfo
->
lastValue
,
double
,
pCtx
->
inputType
,
GET_INPUT_CHAR_INDEX
(
pCtx
,
index
));
}
else
if
(
pInfo
->
p
.
k
ey
==
INT64_MIN
)
{
pInfo
->
p
.
k
ey
=
primaryKey
[
index
];
GET_TYPED_DATA
(
pInfo
->
p
.
val
,
double
,
pCtx
->
inputType
,
GET_INPUT_CHAR_INDEX
(
pCtx
,
index
));
pInfo
->
hasResult
=
DATA_SET_FLAG
;
pInfo
->
win
.
skey
=
pInfo
->
lastK
ey
;
pInfo
->
win
.
skey
=
pInfo
->
p
.
k
ey
;
notNullElems
++
;
i
+=
1
;
}
...
...
@@ -3854,9 +3865,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
continue
;
}
pInfo
->
dOutput
+=
((
val
[
i
]
+
pInfo
->
lastValue
)
/
2
)
*
(
primaryKey
[
i
+
index
]
-
pInfo
->
lastKey
)
;
pInfo
->
lastValue
=
val
[
i
]
;
pInfo
->
lastKey
=
primaryKey
[
i
+
index
]
;
SPoint1
st
=
{.
key
=
primaryKey
[
i
+
index
],
.
val
=
val
[
i
]}
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
)
;
pInfo
->
p
=
st
;
}
break
;
}
...
...
@@ -3867,9 +3878,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
continue
;
}
pInfo
->
dOutput
+=
((
val
[
i
]
+
pInfo
->
lastValue
)
/
2
)
*
(
primaryKey
[
i
+
index
]
-
pInfo
->
lastKey
)
;
pInfo
->
lastValue
=
val
[
i
]
;
pInfo
->
lastKey
=
primaryKey
[
i
+
index
]
;
SPoint1
st
=
{.
key
=
primaryKey
[
i
+
index
],
.
val
=
val
[
i
]}
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
)
;
pInfo
->
p
=
st
;
}
break
;
}
...
...
@@ -3880,9 +3891,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
continue
;
}
pInfo
->
dOutput
+=
((
val
[
i
]
+
pInfo
->
lastValue
)
/
2
)
*
(
primaryKey
[
i
+
index
]
-
pInfo
->
lastKey
)
;
pInfo
->
lastValue
=
val
[
i
]
;
pInfo
->
lastKey
=
primaryKey
[
i
+
index
]
;
SPoint1
st
=
{.
key
=
primaryKey
[
i
+
index
],
.
val
=
val
[
i
]}
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
)
;
pInfo
->
p
=
st
;
}
break
;
}
...
...
@@ -3893,9 +3904,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
continue
;
}
pInfo
->
dOutput
+=
((
val
[
i
]
+
pInfo
->
lastValue
)
/
2
)
*
(
primaryKey
[
i
+
index
]
-
pInfo
->
lastKey
)
;
pInfo
->
lastValue
=
(
double
)
val
[
i
]
;
pInfo
->
lastKey
=
primaryKey
[
i
+
index
]
;
SPoint1
st
=
{.
key
=
primaryKey
[
i
+
index
],
.
val
=
(
double
)
val
[
i
]}
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
)
;
pInfo
->
p
=
st
;
}
break
;
}
...
...
@@ -3906,9 +3917,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
continue
;
}
pInfo
->
dOutput
+=
((
val
[
i
]
+
pInfo
->
lastValue
)
/
2
)
*
(
primaryKey
[
i
+
index
]
-
pInfo
->
lastKey
)
;
pInfo
->
lastValue
=
val
[
i
]
;
pInfo
->
lastKey
=
primaryKey
[
i
+
index
]
;
SPoint1
st
=
{.
key
=
primaryKey
[
i
+
index
],
.
val
=
val
[
i
]}
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
);
//((val[i] + pInfo->p.val) / 2) * (primaryKey[i + index] - pInfo->p.key)
;
pInfo
->
p
=
st
;
}
break
;
}
...
...
@@ -3919,9 +3930,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
continue
;
}
pInfo
->
dOutput
+=
((
val
[
i
]
+
pInfo
->
lastValue
)
/
2
)
*
(
primaryKey
[
i
+
index
]
-
pInfo
->
lastKey
)
;
pInfo
->
lastValue
=
val
[
i
]
;
pInfo
->
lastKey
=
primaryKey
[
i
+
index
]
;
SPoint1
st
=
{.
key
=
primaryKey
[
i
+
index
],
.
val
=
val
[
i
]}
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
);
//((val[i] + pInfo->p.val) / 2) * (primaryKey[i + index] - pInfo->p.key)
;
pInfo
->
p
=
st
;
}
break
;
}
...
...
@@ -3930,12 +3941,11 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
// the last interpolated time window value
if
(
pCtx
->
end
.
key
!=
INT64_MIN
)
{
pInfo
->
dOutput
+=
((
pInfo
->
lastValue
+
pCtx
->
end
.
val
)
/
2
)
*
(
pCtx
->
end
.
key
-
pInfo
->
lastKey
);
pInfo
->
lastValue
=
pCtx
->
end
.
val
;
pInfo
->
lastKey
=
pCtx
->
end
.
key
;
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
pCtx
->
end
);
//((pInfo->p.val + pCtx->end.val) / 2) * (pCtx->end.key - pInfo->p.key);
pInfo
->
p
=
pCtx
->
end
;
}
pInfo
->
win
.
ekey
=
pInfo
->
lastK
ey
;
pInfo
->
win
.
ekey
=
pInfo
->
p
.
k
ey
;
SET_VAL
(
pCtx
,
notNullElems
,
1
);
...
...
@@ -3966,7 +3976,7 @@ static void twa_func_merge(SQLFunctionCtx *pCtx) {
pBuf
->
dOutput
+=
pInput
->
dOutput
;
pBuf
->
win
=
pInput
->
win
;
pBuf
->
lastKey
=
pInput
->
lastKey
;
pBuf
->
p
=
pInput
->
p
;
}
SET_VAL
(
pCtx
,
numOfNotNull
,
1
);
...
...
@@ -3993,15 +4003,14 @@ void twa_function_finalizer(SQLFunctionCtx *pCtx) {
SResultRowCellInfo
*
pResInfo
=
GET_RES_INFO
(
pCtx
);
STwaInfo
*
pInfo
=
(
STwaInfo
*
)
GET_ROWCELL_INTERBUF
(
pResInfo
);
assert
(
pInfo
->
win
.
ekey
==
pInfo
->
lastKey
&&
pInfo
->
hasResult
==
pResInfo
->
hasResult
);
if
(
pInfo
->
hasResult
!=
DATA_SET_FLAG
)
{
setNull
(
pCtx
->
aOutputBuf
,
TSDB_DATA_TYPE_DOUBLE
,
sizeof
(
double
));
return
;
}
assert
(
pInfo
->
win
.
ekey
==
pInfo
->
p
.
key
&&
pInfo
->
hasResult
==
pResInfo
->
hasResult
);
if
(
pInfo
->
win
.
ekey
==
pInfo
->
win
.
skey
)
{
*
(
double
*
)
pCtx
->
aOutputBuf
=
pInfo
->
lastValue
;
*
(
double
*
)
pCtx
->
aOutputBuf
=
pInfo
->
p
.
val
;
}
else
{
*
(
double
*
)
pCtx
->
aOutputBuf
=
pInfo
->
dOutput
/
(
pInfo
->
win
.
ekey
-
pInfo
->
win
.
skey
);
}
...
...
src/client/src/tscParseInsert.c
浏览文件 @
cf4f0d95
...
...
@@ -1446,18 +1446,21 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
int32_t
count
=
0
;
int32_t
maxRows
=
0
;
tscDestroyBlockArrayList
(
pSql
->
cmd
.
pDataBlocks
);
pCmd
->
pDataBlocks
=
taosArrayInit
(
1
,
POINTER_BYTES
);
tfree
(
pCmd
->
pTableMetaList
);
pCmd
->
pDataBlocks
=
tscDestroyBlockArrayList
(
pCmd
->
pDataBlocks
);
if
(
pCmd
->
pTableBlockHashList
==
NULL
)
{
pCmd
->
pTableBlockHashList
=
taosHashInit
(
16
,
taosGetDefaultHashFunction
(
TSDB_DATA_TYPE_BIGINT
),
true
,
false
);
}
STableDataBlocks
*
pTableDataBlock
=
NULL
;
int32_t
ret
=
tscCreateDataBlock
(
TSDB_PAYLOAD_SIZE
,
tinfo
.
rowSize
,
sizeof
(
SSubmitBlk
),
pTableMetaInfo
->
name
,
pTableMeta
,
&
pTableDataBlock
);
int32_t
ret
=
tscGetDataBlockFromList
(
pCmd
->
pTableBlockHashList
,
pTableMeta
->
id
.
uid
,
TSDB_PAYLOAD_SIZE
,
sizeof
(
SSubmitBlk
),
tinfo
.
rowSize
,
pTableMetaInfo
->
name
,
pTableMeta
,
&
pTableDataBlock
,
NULL
);
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
// return ret;
}
taosArrayPush
(
pCmd
->
pDataBlocks
,
&
pTableDataBlock
);
tscAllocateMemIfNeed
(
pTableDataBlock
,
tinfo
.
rowSize
,
&
maxRows
);
char
*
tokenBuf
=
calloc
(
1
,
4096
);
while
((
readLen
=
tgetline
(
&
line
,
&
n
,
fp
))
!=
-
1
)
{
...
...
@@ -1519,8 +1522,6 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
SImportFileSupport
*
pSupporter
=
calloc
(
1
,
sizeof
(
SImportFileSupport
));
SSqlObj
*
pNew
=
createSubqueryObj
(
pSql
,
0
,
parseFileSendDataBlock
,
pSupporter
,
TSDB_SQL_INSERT
,
NULL
);
pNew
->
cmd
.
pDataBlocks
=
taosArrayInit
(
4
,
POINTER_BYTES
);
pCmd
->
count
=
1
;
FILE
*
fp
=
fopen
(
pCmd
->
payload
,
"r"
);
...
...
src/client/src/tscSQLParser.c
浏览文件 @
cf4f0d95
...
...
@@ -5913,12 +5913,16 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
if
(
pExprList
->
nExpr
!=
1
)
{
return
invalidSqlErrMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg1
);
}
bool
server_status
=
false
;
tSQLExpr
*
pExpr
=
pExprList
->
a
[
0
].
pNode
;
if
(
pExpr
->
operand
.
z
==
NULL
)
{
//handle 'select 1'
if
(
pExpr
->
token
.
n
==
1
&&
0
==
strncasecmp
(
pExpr
->
token
.
z
,
"1"
,
1
))
{
server_status
=
true
;
}
else
{
return
invalidSqlErrMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg2
);
}
}
// TODO redefine the function
SDNodeDynConfOption
functionsInfo
[
5
]
=
{{
"database()"
,
10
},
{
"server_version()"
,
16
},
...
...
@@ -5927,6 +5931,9 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
{
"current_user()"
,
14
}};
int32_t
index
=
-
1
;
if
(
server_status
==
true
)
{
index
=
2
;
}
else
{
for
(
int32_t
i
=
0
;
i
<
tListLen
(
functionsInfo
);
++
i
)
{
if
(
strncasecmp
(
functionsInfo
[
i
].
name
,
pExpr
->
operand
.
z
,
functionsInfo
[
i
].
len
)
==
0
&&
functionsInfo
[
i
].
len
==
pExpr
->
operand
.
n
)
{
...
...
@@ -5934,6 +5941,7 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
break
;
}
}
}
switch
(
index
)
{
case
0
:
...
...
src/client/src/tscServer.c
浏览文件 @
cf4f0d95
...
...
@@ -152,8 +152,14 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
SRpcEpSet
*
epSet
=
&
pRsp
->
epSet
;
if
(
epSet
->
numOfEps
>
0
)
{
tscEpSetHtons
(
epSet
);
if
(
!
tscEpSetIsEqual
(
&
pSql
->
pTscObj
->
tscCorMgmtEpSet
->
epSet
,
epSet
))
{
tscTrace
(
"%p updating epset: numOfEps: %d, inUse: %d"
,
pSql
,
epSet
->
numOfEps
,
epSet
->
inUse
);
for
(
int8_t
i
=
0
;
i
<
epSet
->
numOfEps
;
i
++
)
{
tscTrace
(
"endpoint %d: fqdn = %s, port=%d"
,
i
,
epSet
->
fqdn
[
i
],
epSet
->
port
[
i
]);
}
tscUpdateMgmtEpSet
(
pSql
,
epSet
);
}
}
pSql
->
pTscObj
->
connId
=
htonl
(
pRsp
->
connId
);
...
...
@@ -208,7 +214,7 @@ int tscSendMsgToServer(SSqlObj *pSql) {
STscObj
*
pObj
=
pSql
->
pTscObj
;
SSqlCmd
*
pCmd
=
&
pSql
->
cmd
;
char
*
pMsg
=
rpcMallocCont
(
pCmd
->
payloadLen
);
char
*
pMsg
=
rpcMallocCont
(
sizeof
(
SMsgVersion
)
+
pCmd
->
payloadLen
);
if
(
NULL
==
pMsg
)
{
tscError
(
"%p msg:%s malloc failed"
,
pSql
,
taosMsg
[
pSql
->
cmd
.
msgType
]);
return
TSDB_CODE_TSC_OUT_OF_MEMORY
;
...
...
@@ -219,12 +225,13 @@ int tscSendMsgToServer(SSqlObj *pSql) {
tscDumpMgmtEpSet
(
pSql
);
}
memcpy
(
pMsg
,
pSql
->
cmd
.
payload
,
pSql
->
cmd
.
payloadLen
);
tstrncpy
(
pMsg
,
version
,
sizeof
(
SMsgVersion
));
memcpy
(
pMsg
+
sizeof
(
SMsgVersion
),
pSql
->
cmd
.
payload
,
pSql
->
cmd
.
payloadLen
);
SRpcMsg
rpcMsg
=
{
.
msgType
=
pSql
->
cmd
.
msgType
,
.
pCont
=
pMsg
,
.
contLen
=
pSql
->
cmd
.
payloadLen
,
.
contLen
=
pSql
->
cmd
.
payloadLen
+
sizeof
(
SMsgVersion
)
,
.
ahandle
=
(
void
*
)
pSql
->
self
,
.
handle
=
NULL
,
.
code
=
0
...
...
src/client/src/tscSubquery.c
浏览文件 @
cf4f0d95
...
...
@@ -2172,6 +2172,15 @@ static bool needRetryInsert(SSqlObj* pParentObj, int32_t numOfSub) {
return
true
;
}
static
void
doFreeInsertSupporter
(
SSqlObj
*
pSqlObj
)
{
assert
(
pSqlObj
!=
NULL
&&
pSqlObj
->
subState
.
numOfSub
>
0
);
for
(
int32_t
i
=
0
;
i
<
pSqlObj
->
subState
.
numOfSub
;
++
i
)
{
SSqlObj
*
pSql
=
pSqlObj
->
pSubs
[
i
];
tfree
(
pSql
->
param
);
}
}
static
void
multiVnodeInsertFinalize
(
void
*
param
,
TAOS_RES
*
tres
,
int
numOfRows
)
{
SInsertSupporter
*
pSupporter
=
(
SInsertSupporter
*
)
param
;
SSqlObj
*
pParentObj
=
pSupporter
->
pSql
;
...
...
@@ -2203,10 +2212,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
if
(
pParentObj
->
res
.
code
==
TSDB_CODE_SUCCESS
)
{
tscDebug
(
"%p Async insertion completed, total inserted:%d"
,
pParentObj
,
pParentObj
->
res
.
numOfRows
);
for
(
int32_t
i
=
0
;
i
<
numOfSub
;
++
i
)
{
SSqlObj
*
pSql
=
pParentObj
->
pSubs
[
i
];
tfree
(
pSql
->
param
);
}
doFreeInsertSupporter
(
pParentObj
);
// todo remove this parameter in async callback function definition.
// all data has been sent to vnode, call user function
...
...
@@ -2214,6 +2220,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
(
*
pParentObj
->
fp
)(
pParentObj
->
param
,
pParentObj
,
v
);
}
else
{
if
(
!
needRetryInsert
(
pParentObj
,
numOfSub
))
{
doFreeInsertSupporter
(
pParentObj
);
tscQueueAsyncRes
(
pParentObj
);
return
;
}
...
...
@@ -2244,16 +2251,19 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
pParentObj
->
cmd
.
parseFinished
=
false
;
pParentObj
->
subState
.
numOfRemain
=
numOfFailed
;
pParentObj
->
subState
.
numOfSub
=
numOfFailed
;
tscResetSqlCmdObj
(
&
pParentObj
->
cmd
,
false
);
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly.
// 2. vnode may need the schema information along with submit block to update its local table schema.
tscDebug
(
"%p re-parse sql to generate submit data, retry:%d"
,
pParentObj
,
pParentObj
->
retry
++
);
int32_t
code
=
tsParseSql
(
pParentObj
,
true
);
if
(
code
==
TSDB_CODE_TSC_ACTION_IN_PROGRESS
)
return
;
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
pParentObj
->
res
.
code
=
code
;
doFreeInsertSupporter
(
pParentObj
);
tscQueueAsyncRes
(
pParentObj
);
return
;
}
...
...
src/client/src/tscUtil.c
浏览文件 @
cf4f0d95
...
...
@@ -2044,7 +2044,11 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
pnCmd
->
numOfClause
=
0
;
pnCmd
->
clauseIndex
=
0
;
pnCmd
->
pDataBlocks
=
NULL
;
pnCmd
->
numOfTables
=
0
;
pnCmd
->
parseFinished
=
1
;
pnCmd
->
pTableMetaList
=
NULL
;
pnCmd
->
pTableBlockHashList
=
NULL
;
if
(
tscAddSubqueryInfo
(
pnCmd
)
!=
TSDB_CODE_SUCCESS
)
{
terrno
=
TSDB_CODE_TSC_OUT_OF_MEMORY
;
...
...
src/common/inc/tglobal.h
浏览文件 @
cf4f0d95
...
...
@@ -46,7 +46,7 @@ extern int32_t tsShellActivityTimer;
extern
uint32_t
tsMaxTmrCtrl
;
extern
float
tsNumOfThreadsPerCore
;
extern
int32_t
tsNumOfCommitThreads
;
extern
float
tsRatioOfQuery
Threads
;
// todo remove it
extern
float
tsRatioOfQuery
Cores
;
extern
int8_t
tsDaylight
;
extern
char
tsTimezone
[];
extern
char
tsLocale
[];
...
...
@@ -57,7 +57,7 @@ extern char tsTempDir[];
//query buffer management
extern
int32_t
tsQueryBufferSize
;
// maximum allowed usage buffer for each data node during query processing
extern
int32_t
ts
HalfCoresForQuery
;
// only 50% will be used in query processing
extern
int32_t
ts
RetrieveBlockingModel
;
// only 50% will be used in query processing
// client
extern
int32_t
tsTableMetaKeepTimer
;
...
...
src/common/src/tglobal.c
浏览文件 @
cf4f0d95
...
...
@@ -52,7 +52,7 @@ int32_t tsMaxConnections = 5000;
int32_t
tsShellActivityTimer
=
3
;
// second
float
tsNumOfThreadsPerCore
=
1
.
0
f
;
int32_t
tsNumOfCommitThreads
=
1
;
float
tsRatioOfQuery
Threads
=
0
.
5
f
;
float
tsRatioOfQuery
Cores
=
1
.
0
f
;
int8_t
tsDaylight
=
0
;
char
tsTimezone
[
TSDB_TIMEZONE_LEN
]
=
{
0
};
char
tsLocale
[
TSDB_LOCALE_LEN
]
=
{
0
};
...
...
@@ -107,8 +107,8 @@ int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance
// positive value (in MB)
int32_t
tsQueryBufferSize
=
-
1
;
//
only 50% cpu will be used in query processing in dnode
int32_t
ts
HalfCoresForQuery
=
0
;
//
in retrieve blocking model, the retrieve threads will wait for the completion of the query processing.
int32_t
ts
RetrieveBlockingModel
=
0
;
// db parameters
int32_t
tsCacheBlockSize
=
TSDB_DEFAULT_CACHE_BLOCK_SIZE
;
...
...
@@ -206,7 +206,7 @@ int32_t tsNumOfLogLines = 10000000;
int32_t
mDebugFlag
=
131
;
int32_t
sdbDebugFlag
=
131
;
int32_t
dDebugFlag
=
135
;
int32_t
vDebugFlag
=
13
1
;
int32_t
vDebugFlag
=
13
5
;
int32_t
cDebugFlag
=
131
;
int32_t
jniDebugFlag
=
131
;
int32_t
odbcDebugFlag
=
131
;
...
...
@@ -444,12 +444,12 @@ static void doInitGlobalConfig(void) {
cfg
.
unitType
=
TAOS_CFG_UTYPE_NONE
;
taosInitConfigOption
(
cfg
);
cfg
.
option
=
"ratioOfQuery
Thread
s"
;
cfg
.
ptr
=
&
tsRatioOfQuery
Thread
s
;
cfg
.
option
=
"ratioOfQuery
Core
s"
;
cfg
.
ptr
=
&
tsRatioOfQuery
Core
s
;
cfg
.
valType
=
TAOS_CFG_VTYPE_FLOAT
;
cfg
.
cfgType
=
TSDB_CFG_CTYPE_B_CONFIG
;
cfg
.
minValue
=
0
.
1
f
;
cfg
.
maxValue
=
0
.
9
f
;
cfg
.
minValue
=
0
.
0
f
;
cfg
.
maxValue
=
2
.
0
f
;
cfg
.
ptrLength
=
0
;
cfg
.
unitType
=
TAOS_CFG_UTYPE_NONE
;
taosInitConfigOption
(
cfg
);
...
...
@@ -887,8 +887,8 @@ static void doInitGlobalConfig(void) {
cfg
.
unitType
=
TAOS_CFG_UTYPE_BYTE
;
taosInitConfigOption
(
cfg
);
cfg
.
option
=
"
halfCoresForQuery
"
;
cfg
.
ptr
=
&
ts
HalfCoresForQuery
;
cfg
.
option
=
"
retrieveBlockingModel
"
;
cfg
.
ptr
=
&
ts
RetrieveBlockingModel
;
cfg
.
valType
=
TAOS_CFG_VTYPE_INT32
;
cfg
.
cfgType
=
TSDB_CFG_CTYPE_B_CONFIG
|
TSDB_CFG_CTYPE_B_SHOW
;
cfg
.
minValue
=
0
;
...
...
src/cq/src/cqMain.c
浏览文件 @
cf4f0d95
...
...
@@ -161,7 +161,7 @@ void cqStop(void *handle) {
return
;
}
SCqContext
*
pContext
=
handle
;
c
Info
(
"vgId:%d, stop all CQs"
,
pContext
->
vgId
);
c
Debug
(
"vgId:%d, stop all CQs"
,
pContext
->
vgId
);
if
(
pContext
->
dbConn
==
NULL
||
pContext
->
master
==
0
)
return
;
pthread_mutex_lock
(
&
pContext
->
mutex
);
...
...
src/dnode/src/dnodeEps.c
浏览文件 @
cf4f0d95
...
...
@@ -130,7 +130,7 @@ static void dnodePrintEps(SDnodeEps *eps) {
dDebug
(
"print dnodeEp, dnodeNum:%d"
,
eps
->
dnodeNum
);
for
(
int32_t
i
=
0
;
i
<
eps
->
dnodeNum
;
i
++
)
{
SDnodeEp
*
ep
=
&
eps
->
dnodeEps
[
i
];
dDebug
(
"dnode
Id
:%d, dnodeFqdn:%s dnodePort:%u"
,
ep
->
dnodeId
,
ep
->
dnodeFqdn
,
ep
->
dnodePort
);
dDebug
(
"dnode:%d, dnodeFqdn:%s dnodePort:%u"
,
ep
->
dnodeId
,
ep
->
dnodeFqdn
,
ep
->
dnodePort
);
}
}
...
...
src/dnode/src/dnodeMRead.c
浏览文件 @
cf4f0d95
...
...
@@ -124,8 +124,6 @@ void dnodeDispatchToMReadQueue(SRpcMsg *pMsg) {
SMnodeMsg
*
pRead
=
mnodeCreateMsg
(
pMsg
);
taosWriteQitem
(
tsMReadQueue
,
TAOS_QTYPE_RPC
,
pRead
);
}
rpcFreeCont
(
pMsg
->
pCont
);
}
static
void
dnodeFreeMReadMsg
(
SMnodeMsg
*
pRead
)
{
...
...
src/dnode/src/dnodeMWrite.c
浏览文件 @
cf4f0d95
...
...
@@ -125,8 +125,6 @@ void dnodeDispatchToMWriteQueue(SRpcMsg *pMsg) {
taosMsg
[
pWrite
->
rpcMsg
.
msgType
],
tsMWriteQueue
);
taosWriteQitem
(
tsMWriteQueue
,
TAOS_QTYPE_RPC
,
pWrite
);
}
rpcFreeCont
(
pMsg
->
pCont
);
}
static
void
dnodeFreeMWriteMsg
(
SMnodeMsg
*
pWrite
)
{
...
...
src/dnode/src/dnodeShell.c
浏览文件 @
cf4f0d95
...
...
@@ -70,8 +70,7 @@ int32_t dnodeInitShell() {
dnodeProcessShellMsgFp
[
TSDB_MSG_TYPE_NETWORK_TEST
]
=
dnodeSendStartupStep
;
int32_t
numOfThreads
=
tsNumOfCores
*
tsNumOfThreadsPerCore
;
numOfThreads
=
(
int32_t
)
((
1
.
0
-
tsRatioOfQueryThreads
)
*
numOfThreads
/
2
.
0
);
int32_t
numOfThreads
=
(
tsNumOfCores
*
tsNumOfThreadsPerCore
)
/
2
.
0
;
if
(
numOfThreads
<
1
)
{
numOfThreads
=
1
;
}
...
...
@@ -128,7 +127,20 @@ static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
}
else
{}
if
(
dnodeProcessShellMsgFp
[
pMsg
->
msgType
]
)
{
SMsgVersion
*
pMsgVersion
=
pMsg
->
pCont
;
if
(
taosCheckVersion
(
pMsgVersion
->
clientVersion
,
version
,
3
)
!=
TSDB_CODE_SUCCESS
)
{
rpcMsg
.
code
=
TSDB_CODE_TSC_INVALID_VERSION
;
rpcSendResponse
(
&
rpcMsg
);
rpcFreeCont
(
pMsg
->
pCont
);
return
;
// todo change the error code
}
pMsg
->
pCont
+=
sizeof
(
*
pMsgVersion
);
pMsg
->
contLen
-=
sizeof
(
*
pMsgVersion
);
(
*
dnodeProcessShellMsgFp
[
pMsg
->
msgType
])(
pMsg
);
//pMsg->contLen += sizeof(*pMsgVersion);
rpcFreeCont
(
pMsg
->
pCont
-
sizeof
(
*
pMsgVersion
));
}
else
{
dError
(
"RPC %p, shell msg:%s is not processed"
,
pMsg
->
handle
,
taosMsg
[
pMsg
->
msgType
]);
rpcMsg
.
code
=
TSDB_CODE_DND_MSG_NOT_PROCESSED
;
...
...
src/dnode/src/dnodeSystem.c
浏览文件 @
cf4f0d95
...
...
@@ -16,12 +16,15 @@
#define _DEFAULT_SOURCE
#include "os.h"
#include "tgrant.h"
#include "tconfig.h"
#include "dnodeMain.h"
static
void
signal_handler
(
int32_t
signum
,
siginfo_t
*
sigInfo
,
void
*
context
);
static
tsem_t
exitSem
;
int32_t
main
(
int32_t
argc
,
char
*
argv
[])
{
int
dump_config
=
0
;
// Set global configuration file
for
(
int32_t
i
=
1
;
i
<
argc
;
++
i
)
{
if
(
strcmp
(
argv
[
i
],
"-c"
)
==
0
)
{
...
...
@@ -35,6 +38,8 @@ int32_t main(int32_t argc, char *argv[]) {
printf
(
"'-c' requires a parameter, default:%s
\n
"
,
configDir
);
exit
(
EXIT_FAILURE
);
}
}
else
if
(
strcmp
(
argv
[
i
],
"-C"
)
==
0
)
{
dump_config
=
1
;
}
else
if
(
strcmp
(
argv
[
i
],
"-V"
)
==
0
)
{
#ifdef _ACCT
char
*
versionStr
=
"enterprise"
;
...
...
@@ -87,6 +92,20 @@ int32_t main(int32_t argc, char *argv[]) {
#endif
}
if
(
0
!=
dump_config
)
{
tscEmbedded
=
1
;
taosInitGlobalCfg
();
taosReadGlobalLogCfg
();
if
(
!
taosReadGlobalCfg
())
{
printf
(
"TDengine read global config failed"
);
exit
(
EXIT_FAILURE
);
}
taosDumpGlobalCfg
();
exit
(
EXIT_SUCCESS
);
}
if
(
tsem_init
(
&
exitSem
,
0
,
0
)
!=
0
)
{
printf
(
"failed to create exit semphore
\n
"
);
exit
(
EXIT_FAILURE
);
...
...
src/dnode/src/dnodeVMgmt.c
浏览文件 @
cf4f0d95
...
...
@@ -198,7 +198,7 @@ static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg) {
SCreateMnodeMsg
*
pCfg
=
pMsg
->
pCont
;
pCfg
->
dnodeId
=
htonl
(
pCfg
->
dnodeId
);
if
(
pCfg
->
dnodeId
!=
dnodeGetDnodeId
())
{
dDebug
(
"dnode
Id
:%d, in create mnode msg is not equal with saved dnodeId:%d"
,
pCfg
->
dnodeId
,
dnodeGetDnodeId
());
dDebug
(
"dnode:%d, in create mnode msg is not equal with saved dnodeId:%d"
,
pCfg
->
dnodeId
,
dnodeGetDnodeId
());
return
TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED
;
}
...
...
@@ -207,7 +207,7 @@ static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg) {
return
TSDB_CODE_MND_DNODE_EP_NOT_CONFIGURED
;
}
dDebug
(
"dnode
Id
:%d, create mnode msg is received from mnodes, numOfMnodes:%d"
,
pCfg
->
dnodeId
,
pCfg
->
mnodes
.
mnodeNum
);
dDebug
(
"dnode:%d, create mnode msg is received from mnodes, numOfMnodes:%d"
,
pCfg
->
dnodeId
,
pCfg
->
mnodes
.
mnodeNum
);
for
(
int
i
=
0
;
i
<
pCfg
->
mnodes
.
mnodeNum
;
++
i
)
{
pCfg
->
mnodes
.
mnodeInfos
[
i
].
mnodeId
=
htonl
(
pCfg
->
mnodes
.
mnodeInfos
[
i
].
mnodeId
);
dDebug
(
"mnode index:%d, mnode:%d:%s"
,
i
,
pCfg
->
mnodes
.
mnodeInfos
[
i
].
mnodeId
,
pCfg
->
mnodes
.
mnodeInfos
[
i
].
mnodeEp
);
...
...
src/dnode/src/dnodeVRead.c
浏览文件 @
cf4f0d95
...
...
@@ -26,16 +26,20 @@ static SWorkerPool tsVQueryWP;
static
SWorkerPool
tsVFetchWP
;
int32_t
dnodeInitVRead
()
{
const
int32_t
maxFetchThreads
=
4
;
// calculate the available query thread
float
threadsForQuery
=
MAX
(
tsNumOfCores
*
tsRatioOfQueryCores
,
1
);
tsVQueryWP
.
name
=
"vquery"
;
tsVQueryWP
.
workerFp
=
dnodeProcessReadQueue
;
tsVQueryWP
.
min
=
tsNumOfCores
;
tsVQueryWP
.
max
=
tsNumOfCores
/* * tsNumOfThreadsPerCore*/
;
// if (tsVQueryWP.max <= tsVQueryWP.min * 2) tsVQueryWP.max = 2 * tsVQueryWP.min;
tsVQueryWP
.
min
=
(
int32_t
)
threadsForQuery
;
tsVQueryWP
.
max
=
tsVQueryWP
.
min
;
if
(
tWorkerInit
(
&
tsVQueryWP
)
!=
0
)
return
-
1
;
tsVFetchWP
.
name
=
"vfetch"
;
tsVFetchWP
.
workerFp
=
dnodeProcessReadQueue
;
tsVFetchWP
.
min
=
MIN
(
4
,
tsNumOfCores
);
tsVFetchWP
.
min
=
MIN
(
maxFetchThreads
,
tsNumOfCores
);
tsVFetchWP
.
max
=
tsVFetchWP
.
min
;
if
(
tWorkerInit
(
&
tsVFetchWP
)
!=
0
)
return
-
1
;
...
...
@@ -73,8 +77,6 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) {
SRpcMsg
rpcRsp
=
{.
handle
=
pMsg
->
handle
,
.
code
=
TSDB_CODE_VND_INVALID_VGROUP_ID
};
rpcSendResponse
(
&
rpcRsp
);
}
rpcFreeCont
(
pMsg
->
pCont
);
}
void
*
dnodeAllocVQueryQueue
(
void
*
pVnode
)
{
...
...
src/dnode/src/dnodeVWrite.c
浏览文件 @
cf4f0d95
...
...
@@ -102,7 +102,6 @@ void dnodeDispatchToVWriteQueue(SRpcMsg *pRpcMsg) {
}
vnodeRelease
(
pVnode
);
rpcFreeCont
(
pRpcMsg
->
pCont
);
}
void
*
dnodeAllocVWriteQueue
(
void
*
pVnode
)
{
...
...
src/inc/taoserror.h
浏览文件 @
cf4f0d95
...
...
@@ -206,9 +206,10 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, 0, 0x0507, "Missing da
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_OUT_OF_MEMORY
,
0
,
0x0508
,
"Out of memory"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_APP_ERROR
,
0
,
0x0509
,
"Unexpected generic error in vnode"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_INVALID_VRESION_FILE
,
0
,
0x050A
,
"Invalid version file"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_IS_FULL
,
0
,
0x050B
,
"Vnode memory is full because commit failed"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_IS_FULL
,
0
,
0x050B
,
"Database memory is full for commit failed"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_IS_FLOWCTRL
,
0
,
0x050C
,
"Database memory is full for waiting commit"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_NOT_SYNCED
,
0
,
0x0511
,
"Database suspended"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_NO_WRITE_AUTH
,
0
,
0x0512
,
"
W
rite operation denied"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_NO_WRITE_AUTH
,
0
,
0x0512
,
"
Database w
rite operation denied"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_VND_SYNCING
,
0
,
0x0513
,
"Database is syncing"
)
// tsdb
...
...
src/inc/taosmsg.h
浏览文件 @
cf4f0d95
...
...
@@ -198,6 +198,10 @@ typedef struct {
int32_t
numOfVnodes
;
}
SMsgDesc
;
typedef
struct
SMsgVersion
{
char
clientVersion
[
TSDB_VERSION_LEN
];
}
SMsgVersion
;
typedef
struct
SMsgHead
{
int32_t
contLen
;
int32_t
vgId
;
...
...
src/kit/shell/inc/shell.h
浏览文件 @
cf4f0d95
...
...
@@ -45,6 +45,7 @@ typedef struct SShellArguments {
char
*
timezone
;
bool
is_raw_time
;
bool
is_use_passwd
;
bool
dump_config
;
char
file
[
TSDB_FILENAME_LEN
];
char
dir
[
TSDB_FILENAME_LEN
];
int
threadNum
;
...
...
src/kit/shell/src/shellEngine.c
浏览文件 @
cf4f0d95
...
...
@@ -509,7 +509,9 @@ static int dumpResultToFile(const char* fname, TAOS_RES* tres) {
static
void
shellPrintNChar
(
const
char
*
str
,
int
length
,
int
width
)
{
int
pos
=
0
,
cols
=
0
;
wchar_t
tail
[
3
];
int
pos
=
0
,
cols
=
0
,
totalCols
=
0
,
tailLen
=
0
;
while
(
pos
<
length
)
{
wchar_t
wc
;
int
bytes
=
mbtowc
(
&
wc
,
str
+
pos
,
MB_CUR_MAX
);
...
...
@@ -526,13 +528,42 @@ static void shellPrintNChar(const char *str, int length, int width) {
#else
int
w
=
wcwidth
(
wc
);
#endif
if
(
w
>
0
)
{
if
(
width
>
0
&&
cols
+
w
>
width
)
{
if
(
w
<=
0
)
{
continue
;
}
if
(
width
<=
0
)
{
printf
(
"%lc"
,
wc
);
continue
;
}
totalCols
+=
w
;
if
(
totalCols
>
width
)
{
break
;
}
if
(
totalCols
<=
(
width
-
3
))
{
printf
(
"%lc"
,
wc
);
cols
+=
w
;
}
else
{
tail
[
tailLen
]
=
wc
;
tailLen
++
;
}
}
if
(
totalCols
>
width
)
{
// width could be 1 or 2, so printf("...") cannot be used
for
(
int
i
=
0
;
i
<
3
;
i
++
)
{
if
(
cols
>=
width
)
{
break
;
}
putchar
(
'.'
);
++
cols
;
}
}
else
{
for
(
int
i
=
0
;
i
<
tailLen
;
i
++
)
{
printf
(
"%lc"
,
tail
[
i
]);
}
cols
=
totalCols
;
}
for
(;
cols
<
width
;
cols
++
)
{
...
...
@@ -656,13 +687,21 @@ static int calcColWidth(TAOS_FIELD* field, int precision) {
return
MAX
(
25
,
width
);
case
TSDB_DATA_TYPE_BINARY
:
case
TSDB_DATA_TYPE_NCHAR
:
if
(
field
->
bytes
>
tsMaxBinaryDisplayWidth
)
{
return
MAX
(
tsMaxBinaryDisplayWidth
,
width
);
}
else
{
return
MAX
(
field
->
bytes
,
width
);
}
case
TSDB_DATA_TYPE_NCHAR
:
{
int16_t
bytes
=
field
->
bytes
*
TSDB_NCHAR_SIZE
;
if
(
bytes
>
tsMaxBinaryDisplayWidth
)
{
return
MAX
(
tsMaxBinaryDisplayWidth
,
width
);
}
else
{
return
MAX
(
bytes
,
width
);
}
}
case
TSDB_DATA_TYPE_TIMESTAMP
:
if
(
args
.
is_raw_time
)
{
return
MAX
(
14
,
width
);
...
...
src/kit/shell/src/shellLinux.c
浏览文件 @
cf4f0d95
...
...
@@ -39,6 +39,7 @@ static struct argp_option options[] = {
{
"user"
,
'u'
,
"USER"
,
0
,
"The user name to use when connecting to the server."
},
{
"user"
,
'A'
,
"Auth"
,
0
,
"The user auth to use when connecting to the server."
},
{
"config-dir"
,
'c'
,
"CONFIG_DIR"
,
0
,
"Configuration directory."
},
{
"dump-config"
,
'C'
,
0
,
0
,
"Dump configuration."
},
{
"commands"
,
's'
,
"COMMANDS"
,
0
,
"Commands to run without enter the shell."
},
{
"raw-time"
,
'r'
,
0
,
0
,
"Output time as uint64_t."
},
{
"file"
,
'f'
,
"FILE"
,
0
,
"Script to run without enter the shell."
},
...
...
@@ -96,6 +97,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
tstrncpy
(
configDir
,
full_path
.
we_wordv
[
0
],
TSDB_FILENAME_LEN
);
wordfree
(
&
full_path
);
break
;
case
'C'
:
arguments
->
dump_config
=
true
;
break
;
case
's'
:
arguments
->
commands
=
arg
;
break
;
...
...
src/kit/shell/src/shellMain.c
浏览文件 @
cf4f0d95
...
...
@@ -15,6 +15,7 @@
#include "os.h"
#include "shell.h"
#include "tconfig.h"
#include "tnettest.h"
pthread_t
pid
;
...
...
@@ -58,6 +59,7 @@ SShellArguments args = {
.
timezone
=
NULL
,
.
is_raw_time
=
false
,
.
is_use_passwd
=
false
,
.
dump_config
=
false
,
.
file
=
"
\0
"
,
.
dir
=
"
\0
"
,
.
threadNum
=
5
,
...
...
@@ -78,6 +80,19 @@ int main(int argc, char* argv[]) {
shellParseArgument
(
argc
,
argv
,
&
args
);
if
(
args
.
dump_config
)
{
taosInitGlobalCfg
();
taosReadGlobalLogCfg
();
if
(
!
taosReadGlobalCfg
())
{
printf
(
"TDengine read global config failed"
);
exit
(
EXIT_FAILURE
);
}
taosDumpGlobalCfg
();
exit
(
0
);
}
if
(
args
.
netTestRole
&&
args
.
netTestRole
[
0
]
!=
0
)
{
taos_init
();
taosNetTest
(
args
.
netTestRole
,
args
.
host
,
args
.
port
,
args
.
pktLen
);
...
...
src/kit/shell/src/shellWindows.c
浏览文件 @
cf4f0d95
...
...
@@ -35,6 +35,8 @@ void printHelp() {
printf
(
"%s%s%s
\n
"
,
indent
,
indent
,
"The user auth to use when connecting to the server."
);
printf
(
"%s%s
\n
"
,
indent
,
"-c"
);
printf
(
"%s%s%s
\n
"
,
indent
,
indent
,
"Configuration directory."
);
printf
(
"%s%s
\n
"
,
indent
,
"-C"
);
printf
(
"%s%s%s
\n
"
,
indent
,
indent
,
"Dump configuration."
);
printf
(
"%s%s
\n
"
,
indent
,
"-s"
);
printf
(
"%s%s%s
\n
"
,
indent
,
indent
,
"Commands to run without enter the shell."
);
printf
(
"%s%s
\n
"
,
indent
,
"-r"
);
...
...
@@ -104,6 +106,8 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
fprintf
(
stderr
,
"Option -c requires an argument
\n
"
);
exit
(
EXIT_FAILURE
);
}
}
else
if
(
strcmp
(
argv
[
i
],
"-C"
)
==
0
)
{
arguments
->
dump_config
=
true
;
}
else
if
(
strcmp
(
argv
[
i
],
"-s"
)
==
0
)
{
if
(
i
<
argc
-
1
)
{
arguments
->
commands
=
argv
[
++
i
];
...
...
src/kit/taosdump/taosdump.c
浏览文件 @
cf4f0d95
...
...
@@ -14,6 +14,9 @@
*/
#include <iconv.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include "os.h"
#include "taos.h"
#include "taosdef.h"
...
...
@@ -366,6 +369,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
static
struct
argp
argp
=
{
options
,
parse_opt
,
args_doc
,
doc
};
static
resultStatistics
g_resultStatistics
=
{
0
};
static
FILE
*
g_fpOfResult
=
NULL
;
static
int
g_numOfCores
=
1
;
int
taosDumpOut
(
struct
arguments
*
arguments
);
int
taosDumpIn
(
struct
arguments
*
arguments
);
...
...
@@ -378,7 +382,7 @@ int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FI
int
taosDumpTableData
(
FILE
*
fp
,
char
*
tbname
,
struct
arguments
*
arguments
,
TAOS
*
taosCon
,
char
*
dbName
);
int
taosCheckParam
(
struct
arguments
*
arguments
);
void
taosFreeDbInfos
();
static
void
taosStartDumpOutWorkThreads
(
struct
arguments
*
args
,
int32_t
numOfThread
,
char
*
dbName
);
static
void
taosStartDumpOutWorkThreads
(
void
*
taosCon
,
struct
arguments
*
args
,
int32_t
numOfThread
,
char
*
dbName
);
struct
arguments
tsArguments
=
{
// connection option
...
...
@@ -540,6 +544,8 @@ int main(int argc, char *argv[]) {
}
}
g_numOfCores
=
(
int32_t
)
sysconf
(
_SC_NPROCESSORS_ONLN
);
time_t
tTime
=
time
(
NULL
);
struct
tm
tm
=
*
localtime
(
&
tTime
);
...
...
@@ -692,65 +698,98 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
sprintf
(
tmpCommand
,
"select tbname from %s"
,
metric
);
TAOS_RES
*
res
ult
=
taos_query
(
taosCon
,
tmpCommand
);
int32_t
code
=
taos_errno
(
res
ult
);
TAOS_RES
*
res
=
taos_query
(
taosCon
,
tmpCommand
);
int32_t
code
=
taos_errno
(
res
);
if
(
code
!=
0
)
{
fprintf
(
stderr
,
"failed to run command %s
\n
"
,
tmpCommand
);
free
(
tmpCommand
);
taos_free_result
(
res
ult
);
taos_free_result
(
res
);
return
-
1
;
}
free
(
tmpCommand
);
TAOS_FIELD
*
fields
=
taos_fetch_fields
(
result
);
int32_t
numOfTable
=
0
;
int32_t
numOfThread
=
*
totalNumOfThread
;
char
tmpFileName
[
TSDB_FILENAME_LEN
+
1
];
while
((
row
=
taos_fetch_row
(
result
))
!=
NULL
)
{
if
(
0
==
numOfTable
)
{
memset
(
tmpFileName
,
0
,
TSDB_FILENAME_LEN
);
sprintf
(
tmpFileName
,
".tables.tmp.%d"
,
numOfThread
);
fd
=
open
(
tmpFileName
,
O_RDWR
|
O_CREAT
,
S_IRWXU
|
S_IRGRP
|
S_IXGRP
|
S_IROTH
);
char
tmpBuf
[
TSDB_FILENAME_LEN
+
1
];
memset
(
tmpBuf
,
0
,
TSDB_FILENAME_LEN
);
sprintf
(
tmpBuf
,
".select-tbname.tmp"
);
fd
=
open
(
tmpBuf
,
O_RDWR
|
O_CREAT
|
O_TRUNC
,
S_IRWXU
|
S_IRGRP
|
S_IXGRP
|
S_IROTH
);
if
(
fd
==
-
1
)
{
fprintf
(
stderr
,
"failed to open temp file: %s
\n
"
,
tmpFileName
);
taos_free_result
(
result
);
for
(
int32_t
loopCnt
=
0
;
loopCnt
<
numOfThread
;
loopCnt
++
)
{
sprintf
(
tmpFileName
,
".tables.tmp.%d"
,
loopCnt
);
(
void
)
remove
(
tmpFileName
);
}
free
(
tmpCommand
);
fprintf
(
stderr
,
"failed to open temp file: %s
\n
"
,
tmpBuf
);
taos_free_result
(
res
);
return
-
1
;
}
numOfThread
++
;
}
TAOS_FIELD
*
fields
=
taos_fetch_fields
(
res
);
int32_t
numOfTable
=
0
;
while
((
row
=
taos_fetch_row
(
res
))
!=
NULL
)
{
memset
(
&
tableRecord
,
0
,
sizeof
(
STableRecord
));
tstrncpy
(
tableRecord
.
name
,
(
char
*
)
row
[
0
],
fields
[
0
].
bytes
);
tstrncpy
(
tableRecord
.
metric
,
metric
,
TSDB_TABLE_NAME_LEN
);
taosWrite
(
fd
,
&
tableRecord
,
sizeof
(
STableRecord
));
numOfTable
++
;
}
taos_free_result
(
res
);
lseek
(
fd
,
0
,
SEEK_SET
);
if
(
numOfTable
>=
arguments
->
table_batch
)
{
numOfTable
=
0
;
int
maxThreads
=
arguments
->
thread_num
;
int
tableOfPerFile
;
if
(
numOfTable
<=
arguments
->
thread_num
)
{
tableOfPerFile
=
1
;
maxThreads
=
numOfTable
;
}
else
{
tableOfPerFile
=
numOfTable
/
arguments
->
thread_num
;
if
(
0
!=
numOfTable
%
arguments
->
thread_num
)
{
tableOfPerFile
+=
1
;
}
}
char
*
tblBuf
=
(
char
*
)
calloc
(
1
,
tableOfPerFile
*
sizeof
(
STableRecord
));
if
(
NULL
==
tblBuf
){
fprintf
(
stderr
,
"failed to calloc %"
PRIzu
"
\n
"
,
tableOfPerFile
*
sizeof
(
STableRecord
));
close
(
fd
);
fd
=
-
1
;
return
-
1
;
}
int32_t
numOfThread
=
*
totalNumOfThread
;
int
subFd
=
-
1
;
for
(;
numOfThread
<
maxThreads
;
numOfThread
++
)
{
memset
(
tmpBuf
,
0
,
TSDB_FILENAME_LEN
);
sprintf
(
tmpBuf
,
".tables.tmp.%d"
,
numOfThread
);
subFd
=
open
(
tmpBuf
,
O_RDWR
|
O_CREAT
|
O_TRUNC
,
S_IRWXU
|
S_IRGRP
|
S_IXGRP
|
S_IROTH
);
if
(
subFd
==
-
1
)
{
fprintf
(
stderr
,
"failed to open temp file: %s
\n
"
,
tmpBuf
);
for
(
int32_t
loopCnt
=
0
;
loopCnt
<
numOfThread
;
loopCnt
++
)
{
sprintf
(
tmpBuf
,
".tables.tmp.%d"
,
loopCnt
);
(
void
)
remove
(
tmpBuf
);
}
sprintf
(
tmpBuf
,
".select-tbname.tmp"
);
(
void
)
remove
(
tmpBuf
);
close
(
fd
);
return
-
1
;
}
// read tableOfPerFile for fd, write to subFd
ssize_t
readLen
=
read
(
fd
,
tblBuf
,
tableOfPerFile
*
sizeof
(
STableRecord
));
if
(
readLen
<=
0
)
{
close
(
subFd
);
break
;
}
taosWrite
(
subFd
,
tblBuf
,
readLen
);
close
(
subFd
);
}
sprintf
(
tmpBuf
,
".select-tbname.tmp"
);
(
void
)
remove
(
tmpBuf
);
if
(
fd
>=
0
)
{
close
(
fd
);
fd
=
-
1
;
}
taos_free_result
(
result
);
*
totalNumOfThread
=
numOfThread
;
free
(
tmpCommand
);
return
0
;
}
...
...
@@ -946,7 +985,7 @@ int taosDumpOut(struct arguments *arguments) {
}
// start multi threads to dumpout
taosStartDumpOutWorkThreads
(
arguments
,
totalNumOfThread
,
dbInfos
[
0
]
->
name
);
taosStartDumpOutWorkThreads
(
taos
,
arguments
,
totalNumOfThread
,
dbInfos
[
0
]
->
name
);
char
tmpFileName
[
TSDB_FILENAME_LEN
+
1
];
_clean_tmp_file:
...
...
@@ -1181,34 +1220,34 @@ void* taosDumpOutWorkThreadFp(void *arg)
STableRecord
tableRecord
;
int
fd
;
char
tmp
FileName
[
TSDB_FILENAME_LEN
*
4
]
=
{
0
};
sprintf
(
tmp
FileName
,
".tables.tmp.%d"
,
pThread
->
threadIndex
);
fd
=
open
(
tmp
FileName
,
O_RDWR
|
O_CREAT
,
S_IRWXU
|
S_IRGRP
|
S_IXGRP
|
S_IROTH
);
char
tmp
Buf
[
TSDB_FILENAME_LEN
*
4
]
=
{
0
};
sprintf
(
tmp
Buf
,
".tables.tmp.%d"
,
pThread
->
threadIndex
);
fd
=
open
(
tmp
Buf
,
O_RDWR
|
O_CREAT
,
S_IRWXU
|
S_IRGRP
|
S_IXGRP
|
S_IROTH
);
if
(
fd
==
-
1
)
{
fprintf
(
stderr
,
"taosDumpTableFp() failed to open temp file: %s
\n
"
,
tmp
FileName
);
fprintf
(
stderr
,
"taosDumpTableFp() failed to open temp file: %s
\n
"
,
tmp
Buf
);
return
NULL
;
}
FILE
*
fp
=
NULL
;
memset
(
tmp
FileName
,
0
,
TSDB_FILENAME_LEN
+
128
);
memset
(
tmp
Buf
,
0
,
TSDB_FILENAME_LEN
+
128
);
if
(
tsArguments
.
outpath
[
0
]
!=
0
)
{
sprintf
(
tmp
FileName
,
"%s/%s.tables.%d.sql"
,
tsArguments
.
outpath
,
pThread
->
dbName
,
pThread
->
threadIndex
);
sprintf
(
tmp
Buf
,
"%s/%s.tables.%d.sql"
,
tsArguments
.
outpath
,
pThread
->
dbName
,
pThread
->
threadIndex
);
}
else
{
sprintf
(
tmp
FileName
,
"%s.tables.%d.sql"
,
pThread
->
dbName
,
pThread
->
threadIndex
);
sprintf
(
tmp
Buf
,
"%s.tables.%d.sql"
,
pThread
->
dbName
,
pThread
->
threadIndex
);
}
fp
=
fopen
(
tmp
FileName
,
"w"
);
fp
=
fopen
(
tmp
Buf
,
"w"
);
if
(
fp
==
NULL
)
{
fprintf
(
stderr
,
"failed to open file %s
\n
"
,
tmp
FileName
);
fprintf
(
stderr
,
"failed to open file %s
\n
"
,
tmp
Buf
);
close
(
fd
);
return
NULL
;
}
memset
(
tmp
FileName
,
0
,
TSDB_FILENAME_LEN
);
sprintf
(
tmp
FileName
,
"use %s"
,
pThread
->
dbName
);
memset
(
tmp
Buf
,
0
,
TSDB_FILENAME_LEN
);
sprintf
(
tmp
Buf
,
"use %s"
,
pThread
->
dbName
);
TAOS_RES
*
tmpResult
=
taos_query
(
pThread
->
taosCon
,
tmp
FileName
);
TAOS_RES
*
tmpResult
=
taos_query
(
pThread
->
taosCon
,
tmp
Buf
);
int32_t
code
=
taos_errno
(
tmpResult
);
if
(
code
!=
0
)
{
fprintf
(
stderr
,
"invalid database %s
\n
"
,
pThread
->
dbName
);
...
...
@@ -1218,6 +1257,9 @@ void* taosDumpOutWorkThreadFp(void *arg)
return
NULL
;
}
int
fileNameIndex
=
1
;
int
tablesInOneFile
=
0
;
int64_t
lastRowsPrint
=
5000000
;
fprintf
(
fp
,
"USE %s;
\n\n
"
,
pThread
->
dbName
);
while
(
1
)
{
ssize_t
readLen
=
read
(
fd
,
&
tableRecord
,
sizeof
(
STableRecord
));
...
...
@@ -1228,6 +1270,33 @@ void* taosDumpOutWorkThreadFp(void *arg)
// TODO: sum table count and table rows by self
pThread
->
tablesOfDumpOut
++
;
pThread
->
rowsOfDumpOut
+=
ret
;
if
(
pThread
->
rowsOfDumpOut
>=
lastRowsPrint
)
{
printf
(
" %"
PRId64
" rows already be dumpout from database %s
\n
"
,
pThread
->
rowsOfDumpOut
,
pThread
->
dbName
);
lastRowsPrint
+=
5000000
;
}
tablesInOneFile
++
;
if
(
tablesInOneFile
>=
tsArguments
.
table_batch
)
{
fclose
(
fp
);
tablesInOneFile
=
0
;
memset
(
tmpBuf
,
0
,
TSDB_FILENAME_LEN
+
128
);
if
(
tsArguments
.
outpath
[
0
]
!=
0
)
{
sprintf
(
tmpBuf
,
"%s/%s.tables.%d-%d.sql"
,
tsArguments
.
outpath
,
pThread
->
dbName
,
pThread
->
threadIndex
,
fileNameIndex
);
}
else
{
sprintf
(
tmpBuf
,
"%s.tables.%d-%d.sql"
,
pThread
->
dbName
,
pThread
->
threadIndex
,
fileNameIndex
);
}
fileNameIndex
++
;
fp
=
fopen
(
tmpBuf
,
"w"
);
if
(
fp
==
NULL
)
{
fprintf
(
stderr
,
"failed to open file %s
\n
"
,
tmpBuf
);
close
(
fd
);
taos_free_result
(
tmpResult
);
return
NULL
;
}
}
}
}
...
...
@@ -1238,7 +1307,7 @@ void* taosDumpOutWorkThreadFp(void *arg)
return
NULL
;
}
static
void
taosStartDumpOutWorkThreads
(
struct
arguments
*
args
,
int32_t
numOfThread
,
char
*
dbName
)
static
void
taosStartDumpOutWorkThreads
(
void
*
taosCon
,
struct
arguments
*
args
,
int32_t
numOfThread
,
char
*
dbName
)
{
pthread_attr_t
thattr
;
SThreadParaObj
*
threadObj
=
(
SThreadParaObj
*
)
calloc
(
numOfThread
,
sizeof
(
SThreadParaObj
));
...
...
@@ -1249,12 +1318,7 @@ static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfTh
pThread
->
threadIndex
=
t
;
pThread
->
totalThreads
=
numOfThread
;
tstrncpy
(
pThread
->
dbName
,
dbName
,
TSDB_TABLE_NAME_LEN
);
pThread
->
taosCon
=
taos_connect
(
args
->
host
,
args
->
user
,
args
->
password
,
NULL
,
args
->
port
);
if
(
pThread
->
taosCon
==
NULL
)
{
fprintf
(
stderr
,
"ERROR: thread:%d failed connect to TDengine, reason:%s
\n
"
,
pThread
->
threadIndex
,
taos_errstr
(
NULL
));
exit
(
0
);
}
pThread
->
taosCon
=
taosCon
;
pthread_attr_init
(
&
thattr
);
pthread_attr_setdetachstate
(
&
thattr
,
PTHREAD_CREATE_JOINABLE
);
...
...
@@ -1273,7 +1337,6 @@ static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfTh
int64_t
totalRowsOfDumpOut
=
0
;
int64_t
totalChildTblsOfDumpOut
=
0
;
for
(
int32_t
t
=
0
;
t
<
numOfThread
;
++
t
)
{
taos_close
(
threadObj
[
t
].
taosCon
);
totalChildTblsOfDumpOut
+=
threadObj
[
t
].
tablesOfDumpOut
;
totalRowsOfDumpOut
+=
threadObj
[
t
].
rowsOfDumpOut
;
}
...
...
@@ -1398,29 +1461,20 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
return
-
1
;
}
TAOS_FIELD
*
fields
=
taos_fetch_fields
(
res
);
int32_t
numOfTable
=
0
;
int32_t
numOfThread
=
0
;
char
tmpFileName
[
TSDB_FILENAME_LEN
+
1
];
while
((
row
=
taos_fetch_row
(
res
))
!=
NULL
)
{
if
(
0
==
numOfTable
)
{
memset
(
tmpFileName
,
0
,
TSDB_FILENAME_LEN
);
sprintf
(
tmpFileName
,
".tables.tmp.%d"
,
numOfThread
);
fd
=
open
(
tmpFileName
,
O_RDWR
|
O_CREAT
,
S_IRWXU
|
S_IRGRP
|
S_IXGRP
|
S_IROTH
);
char
tmpBuf
[
TSDB_FILENAME_LEN
+
1
];
memset
(
tmpBuf
,
0
,
TSDB_FILENAME_LEN
);
sprintf
(
tmpBuf
,
".show-tables.tmp"
);
fd
=
open
(
tmpBuf
,
O_RDWR
|
O_CREAT
|
O_TRUNC
,
S_IRWXU
|
S_IRGRP
|
S_IXGRP
|
S_IROTH
);
if
(
fd
==
-
1
)
{
fprintf
(
stderr
,
"failed to open temp file: %s
\n
"
,
tmpFileName
);
fprintf
(
stderr
,
"failed to open temp file: %s
\n
"
,
tmpBuf
);
taos_free_result
(
res
);
for
(
int32_t
loopCnt
=
0
;
loopCnt
<
numOfThread
;
loopCnt
++
)
{
sprintf
(
tmpFileName
,
".tables.tmp.%d"
,
loopCnt
);
(
void
)
remove
(
tmpFileName
);
}
return
-
1
;
}
numOfThread
++
;
}
TAOS_FIELD
*
fields
=
taos_fetch_fields
(
res
);
int32_t
numOfTable
=
0
;
while
((
row
=
taos_fetch_row
(
res
))
!=
NULL
)
{
memset
(
&
tableRecord
,
0
,
sizeof
(
STableRecord
));
tstrncpy
(
tableRecord
.
name
,
(
char
*
)
row
[
TSDB_SHOW_TABLES_NAME_INDEX
],
fields
[
TSDB_SHOW_TABLES_NAME_INDEX
].
bytes
);
tstrncpy
(
tableRecord
.
metric
,
(
char
*
)
row
[
TSDB_SHOW_TABLES_METRIC_INDEX
],
fields
[
TSDB_SHOW_TABLES_METRIC_INDEX
].
bytes
);
...
...
@@ -1428,13 +1482,59 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
taosWrite
(
fd
,
&
tableRecord
,
sizeof
(
STableRecord
));
numOfTable
++
;
}
taos_free_result
(
res
);
lseek
(
fd
,
0
,
SEEK_SET
);
if
(
numOfTable
>=
arguments
->
table_batch
)
{
numOfTable
=
0
;
int
maxThreads
=
tsArguments
.
thread_num
;
int
tableOfPerFile
;
if
(
numOfTable
<=
tsArguments
.
thread_num
)
{
tableOfPerFile
=
1
;
maxThreads
=
numOfTable
;
}
else
{
tableOfPerFile
=
numOfTable
/
tsArguments
.
thread_num
;
if
(
0
!=
numOfTable
%
tsArguments
.
thread_num
)
{
tableOfPerFile
+=
1
;
}
}
char
*
tblBuf
=
(
char
*
)
calloc
(
1
,
tableOfPerFile
*
sizeof
(
STableRecord
));
if
(
NULL
==
tblBuf
){
fprintf
(
stderr
,
"failed to calloc %"
PRIzu
"
\n
"
,
tableOfPerFile
*
sizeof
(
STableRecord
));
close
(
fd
);
fd
=
-
1
;
return
-
1
;
}
int32_t
numOfThread
=
0
;
int
subFd
=
-
1
;
for
(
numOfThread
=
0
;
numOfThread
<
maxThreads
;
numOfThread
++
)
{
memset
(
tmpBuf
,
0
,
TSDB_FILENAME_LEN
);
sprintf
(
tmpBuf
,
".tables.tmp.%d"
,
numOfThread
);
subFd
=
open
(
tmpBuf
,
O_RDWR
|
O_CREAT
|
O_TRUNC
,
S_IRWXU
|
S_IRGRP
|
S_IXGRP
|
S_IROTH
);
if
(
subFd
==
-
1
)
{
fprintf
(
stderr
,
"failed to open temp file: %s
\n
"
,
tmpBuf
);
for
(
int32_t
loopCnt
=
0
;
loopCnt
<
numOfThread
;
loopCnt
++
)
{
sprintf
(
tmpBuf
,
".tables.tmp.%d"
,
loopCnt
);
(
void
)
remove
(
tmpBuf
);
}
sprintf
(
tmpBuf
,
".show-tables.tmp"
);
(
void
)
remove
(
tmpBuf
);
close
(
fd
);
return
-
1
;
}
// read tableOfPerFile for fd, write to subFd
ssize_t
readLen
=
read
(
fd
,
tblBuf
,
tableOfPerFile
*
sizeof
(
STableRecord
));
if
(
readLen
<=
0
)
{
close
(
subFd
);
break
;
}
taosWrite
(
subFd
,
tblBuf
,
readLen
);
close
(
subFd
);
}
sprintf
(
tmpBuf
,
".show-tables.tmp"
);
(
void
)
remove
(
tmpBuf
);
if
(
fd
>=
0
)
{
close
(
fd
);
...
...
@@ -1444,10 +1544,10 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
taos_free_result
(
res
);
// start multi threads to dumpout
taosStartDumpOutWorkThreads
(
arguments
,
numOfThread
,
dbInfo
->
name
);
taosStartDumpOutWorkThreads
(
taosCon
,
arguments
,
numOfThread
,
dbInfo
->
name
);
for
(
int
loopCnt
=
0
;
loopCnt
<
numOfThread
;
loopCnt
++
)
{
sprintf
(
tmp
FileName
,
".tables.tmp.%d"
,
loopCnt
);
(
void
)
remove
(
tmp
FileName
);
sprintf
(
tmp
Buf
,
".tables.tmp.%d"
,
loopCnt
);
(
void
)
remove
(
tmp
Buf
);
}
return
0
;
...
...
@@ -1552,7 +1652,7 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols
}
int
taosDumpTableData
(
FILE
*
fp
,
char
*
tbname
,
struct
arguments
*
arguments
,
TAOS
*
taosCon
,
char
*
dbName
)
{
/* char temp[MAX_COMMAND_SIZE] = "\0"; */
int64_t
lastRowsPrint
=
5000000
;
int64_t
totalRows
=
0
;
int
count
=
0
;
char
*
pstr
=
NULL
;
...
...
@@ -1684,6 +1784,11 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
count
++
;
fprintf
(
fp
,
"%s"
,
tmpBuffer
);
if
(
totalRows
>=
lastRowsPrint
)
{
printf
(
" %"
PRId64
" rows already be dumpout from %s.%s
\n
"
,
totalRows
,
dbName
,
tbname
);
lastRowsPrint
+=
5000000
;
}
total_sqlstr_len
+=
curr_sqlstr_len
;
if
((
count
>=
arguments
->
data_batch
)
||
(
sql_buf_len
-
total_sqlstr_len
<
TSDB_MAX_BYTES_PER_ROW
))
{
...
...
@@ -2048,6 +2153,7 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c
return
-
1
;
}
int
lastRowsPrint
=
5000000
;
int
lineNo
=
0
;
while
((
read_len
=
getline
(
&
line
,
&
line_len
,
fp
))
!=
-
1
)
{
++
lineNo
;
...
...
@@ -2075,6 +2181,11 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c
memset
(
cmd
,
0
,
TSDB_MAX_ALLOWED_SQL_LEN
);
cmd_len
=
0
;
if
(
lineNo
>=
lastRowsPrint
)
{
printf
(
" %d lines already be executed from file %s
\n
"
,
lineNo
,
fileName
);
lastRowsPrint
+=
5000000
;
}
}
tfree
(
cmd
);
...
...
@@ -2101,7 +2212,7 @@ void* taosDumpInWorkThreadFp(void *arg)
return
NULL
;
}
static
void
taosStartDumpInWorkThreads
(
struct
arguments
*
args
)
static
void
taosStartDumpInWorkThreads
(
void
*
taosCon
,
struct
arguments
*
args
)
{
pthread_attr_t
thattr
;
SThreadParaObj
*
pThread
;
...
...
@@ -2116,11 +2227,7 @@ static void taosStartDumpInWorkThreads(struct arguments *args)
pThread
=
threadObj
+
t
;
pThread
->
threadIndex
=
t
;
pThread
->
totalThreads
=
totalThreads
;
pThread
->
taosCon
=
taos_connect
(
args
->
host
,
args
->
user
,
args
->
password
,
NULL
,
args
->
port
);
if
(
pThread
->
taosCon
==
NULL
)
{
fprintf
(
stderr
,
"ERROR: thread:%d failed connect to TDengine, reason:%s
\n
"
,
pThread
->
threadIndex
,
taos_errstr
(
NULL
));
exit
(
0
);
}
pThread
->
taosCon
=
taosCon
;
pthread_attr_init
(
&
thattr
);
pthread_attr_setdetachstate
(
&
thattr
,
PTHREAD_CREATE_JOINABLE
);
...
...
@@ -2169,7 +2276,7 @@ int taosDumpIn(struct arguments *arguments) {
taosDumpInOneFile
(
taos
,
fp
,
tsfCharset
,
arguments
->
encode
,
tsDbSqlFile
);
}
taosStartDumpInWorkThreads
(
arguments
);
taosStartDumpInWorkThreads
(
taos
,
arguments
);
taos_close
(
taos
);
taosFreeSQLFiles
();
...
...
src/mnode/inc/mnodeMnode.h
浏览文件 @
cf4f0d95
...
...
@@ -43,8 +43,8 @@ void mnodeIncMnodeRef(struct SMnodeObj *pMnode);
void
mnodeDecMnodeRef
(
struct
SMnodeObj
*
pMnode
);
char
*
mnodeGetMnodeRoleStr
();
void
mnodeGetMnodeEpSetForPeer
(
SRpcEpSet
*
epSet
);
void
mnodeGetMnodeEpSetForShell
(
SRpcEpSet
*
epSet
);
void
mnodeGetMnodeEpSetForPeer
(
SRpcEpSet
*
epSet
,
bool
redirect
);
void
mnodeGetMnodeEpSetForShell
(
SRpcEpSet
*
epSet
,
bool
redirect
);
char
*
mnodeGetMnodeMasterEp
();
void
mnodeGetMnodeInfos
(
void
*
mnodes
);
...
...
src/mnode/src/mnodeDnode.c
浏览文件 @
cf4f0d95
...
...
@@ -303,7 +303,7 @@ void mnodeUpdateDnode(SDnodeObj *pDnode) {
int32_t
code
=
sdbUpdateRow
(
&
row
);
if
(
code
!=
TSDB_CODE_SUCCESS
&&
code
!=
TSDB_CODE_MND_ACTION_IN_PROGRESS
)
{
mError
(
"dnode
Id
:%d, failed update"
,
pDnode
->
dnodeId
);
mError
(
"dnode:%d, failed update"
,
pDnode
->
dnodeId
);
}
}
...
...
src/mnode/src/mnodeMnode.c
浏览文件 @
cf4f0d95
...
...
@@ -273,14 +273,14 @@ void mnodeUpdateMnodeEpSet(SMInfos *pMinfos) {
mnodeMnodeUnLock
();
}
void
mnodeGetMnodeEpSetForPeer
(
SRpcEpSet
*
epSet
)
{
void
mnodeGetMnodeEpSetForPeer
(
SRpcEpSet
*
epSet
,
bool
redirect
)
{
mnodeMnodeRdLock
();
*
epSet
=
tsMEpForPeer
;
mnodeMnodeUnLock
();
mTrace
(
"vgId:1, mnodes epSet for peer is returned, num:%d inUse:%d"
,
tsMEpForPeer
.
numOfEps
,
tsMEpForPeer
.
inUse
);
for
(
int32_t
i
=
0
;
i
<
epSet
->
numOfEps
;
++
i
)
{
if
(
strcmp
(
epSet
->
fqdn
[
i
],
tsLocalFqdn
)
==
0
&&
htons
(
epSet
->
port
[
i
])
==
tsServerPort
+
TSDB_PORT_DNODEDNODE
)
{
if
(
redirect
&&
strcmp
(
epSet
->
fqdn
[
i
],
tsLocalFqdn
)
==
0
&&
htons
(
epSet
->
port
[
i
])
==
tsServerPort
+
TSDB_PORT_DNODEDNODE
)
{
epSet
->
inUse
=
(
i
+
1
)
%
epSet
->
numOfEps
;
mTrace
(
"vgId:1, mnode:%d, for peer ep:%s:%u, set inUse to %d"
,
i
,
epSet
->
fqdn
[
i
],
htons
(
epSet
->
port
[
i
]),
epSet
->
inUse
);
}
else
{
...
...
@@ -289,14 +289,14 @@ void mnodeGetMnodeEpSetForPeer(SRpcEpSet *epSet) {
}
}
void
mnodeGetMnodeEpSetForShell
(
SRpcEpSet
*
epSet
)
{
void
mnodeGetMnodeEpSetForShell
(
SRpcEpSet
*
epSet
,
bool
redirect
)
{
mnodeMnodeRdLock
();
*
epSet
=
tsMEpForShell
;
mnodeMnodeUnLock
();
mTrace
(
"vgId:1, mnodes epSet for shell is returned, num:%d inUse:%d"
,
tsMEpForShell
.
numOfEps
,
tsMEpForShell
.
inUse
);
for
(
int32_t
i
=
0
;
i
<
epSet
->
numOfEps
;
++
i
)
{
if
(
strcmp
(
epSet
->
fqdn
[
i
],
tsLocalFqdn
)
==
0
&&
htons
(
epSet
->
port
[
i
])
==
tsServerPort
)
{
if
(
redirect
&&
strcmp
(
epSet
->
fqdn
[
i
],
tsLocalFqdn
)
==
0
&&
htons
(
epSet
->
port
[
i
])
==
tsServerPort
)
{
epSet
->
inUse
=
(
i
+
1
)
%
epSet
->
numOfEps
;
mTrace
(
"vgId:1, mnode:%d, for shell ep:%s:%u, set inUse to %d"
,
i
,
epSet
->
fqdn
[
i
],
htons
(
epSet
->
port
[
i
]),
epSet
->
inUse
);
}
else
{
...
...
src/mnode/src/mnodePeer.c
浏览文件 @
cf4f0d95
...
...
@@ -54,7 +54,7 @@ int32_t mnodeProcessPeerReq(SMnodeMsg *pMsg) {
if
(
!
sdbIsMaster
())
{
SMnodeRsp
*
rpcRsp
=
&
pMsg
->
rpcRsp
;
SRpcEpSet
*
epSet
=
rpcMallocCont
(
sizeof
(
SRpcEpSet
));
mnodeGetMnodeEpSetForPeer
(
epSet
);
mnodeGetMnodeEpSetForPeer
(
epSet
,
true
);
rpcRsp
->
rsp
=
epSet
;
rpcRsp
->
len
=
sizeof
(
SRpcEpSet
);
...
...
src/mnode/src/mnodeProfile.c
浏览文件 @
cf4f0d95
...
...
@@ -282,27 +282,34 @@ static int32_t mnodeRetrieveConns(SShowObj *pShow, char *data, int32_t rows, voi
// not thread safe, need optimized
int32_t
mnodeSaveQueryStreamList
(
SConnObj
*
pConn
,
SHeartBeatMsg
*
pHBMsg
)
{
pConn
->
numOfQueries
=
htonl
(
pHBMsg
->
numOfQueries
);
if
(
pConn
->
numOfQueries
>
0
)
{
pConn
->
numOfQueries
=
0
;
pConn
->
numOfStreams
=
0
;
int32_t
numOfQueries
=
htonl
(
pHBMsg
->
numOfQueries
);
int32_t
numOfStreams
=
htonl
(
pHBMsg
->
numOfStreams
);
if
(
numOfQueries
>
0
)
{
if
(
pConn
->
pQueries
==
NULL
)
{
pConn
->
pQueries
=
calloc
(
sizeof
(
SQueryDesc
),
QUERY_STREAM_SAVE_SIZE
);
}
int32_t
saveSize
=
MIN
(
QUERY_STREAM_SAVE_SIZE
,
pConn
->
numOfQueries
)
*
sizeof
(
SQueryDesc
);
pConn
->
numOfQueries
=
MIN
(
QUERY_STREAM_SAVE_SIZE
,
numOfQueries
);
int32_t
saveSize
=
pConn
->
numOfQueries
*
sizeof
(
SQueryDesc
);
if
(
saveSize
>
0
&&
pConn
->
pQueries
!=
NULL
)
{
memcpy
(
pConn
->
pQueries
,
pHBMsg
->
pData
,
saveSize
);
}
}
pConn
->
numOfStreams
=
htonl
(
pHBMsg
->
numOfStreams
);
if
(
pConn
->
numOfStreams
>
0
)
{
if
(
numOfStreams
>
0
)
{
if
(
pConn
->
pStreams
==
NULL
)
{
pConn
->
pStreams
=
calloc
(
sizeof
(
SStreamDesc
),
QUERY_STREAM_SAVE_SIZE
);
}
int32_t
saveSize
=
MIN
(
QUERY_STREAM_SAVE_SIZE
,
pConn
->
numOfStreams
)
*
sizeof
(
SStreamDesc
);
pConn
->
numOfStreams
=
MIN
(
QUERY_STREAM_SAVE_SIZE
,
numOfStreams
);
int32_t
saveSize
=
pConn
->
numOfStreams
*
sizeof
(
SStreamDesc
);
if
(
saveSize
>
0
&&
pConn
->
pStreams
!=
NULL
)
{
memcpy
(
pConn
->
pStreams
,
pHBMsg
->
pData
+
pConn
->
numOfQueries
*
sizeof
(
SQueryDesc
),
saveSize
);
memcpy
(
pConn
->
pStreams
,
pHBMsg
->
pData
+
numOfQueries
*
sizeof
(
SQueryDesc
),
saveSize
);
}
}
...
...
src/mnode/src/mnodeRead.c
浏览文件 @
cf4f0d95
...
...
@@ -50,7 +50,7 @@ int32_t mnodeProcessRead(SMnodeMsg *pMsg) {
if
(
!
sdbIsMaster
())
{
SMnodeRsp
*
rpcRsp
=
&
pMsg
->
rpcRsp
;
SRpcEpSet
*
epSet
=
rpcMallocCont
(
sizeof
(
SRpcEpSet
));
mnodeGetMnodeEpSetForShell
(
epSet
);
mnodeGetMnodeEpSetForShell
(
epSet
,
true
);
rpcRsp
->
rsp
=
epSet
;
rpcRsp
->
len
=
sizeof
(
SRpcEpSet
);
...
...
src/mnode/src/mnodeShow.c
浏览文件 @
cf4f0d95
...
...
@@ -282,7 +282,7 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) {
pRsp
->
onlineDnodes
=
htonl
(
mnodeGetOnlineDnodesNum
());
pRsp
->
totalDnodes
=
htonl
(
mnodeGetDnodesNum
());
mnodeGetMnodeEpSetForShell
(
&
pRsp
->
epSet
);
mnodeGetMnodeEpSetForShell
(
&
pRsp
->
epSet
,
false
);
pMsg
->
rpcRsp
.
rsp
=
pRsp
;
pMsg
->
rpcRsp
.
len
=
sizeof
(
SHeartBeatRsp
);
...
...
@@ -349,7 +349,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
pConnectRsp
->
writeAuth
=
pUser
->
writeAuth
;
pConnectRsp
->
superAuth
=
pUser
->
superAuth
;
mnodeGetMnodeEpSetForShell
(
&
pConnectRsp
->
epSet
);
mnodeGetMnodeEpSetForShell
(
&
pConnectRsp
->
epSet
,
false
);
connect_over:
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
...
...
src/mnode/src/mnodeVgroup.c
浏览文件 @
cf4f0d95
...
...
@@ -315,7 +315,8 @@ void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVl
for
(
int32_t
i
=
0
;
i
<
pVgroup
->
numOfVnodes
;
++
i
)
{
SVnodeGid
*
pVgid
=
&
pVgroup
->
vnodeGid
[
i
];
if
(
pVgid
->
pDnode
==
pDnode
)
{
mTrace
(
"dnode:%d, receive status from dnode, vgId:%d status is %d:%s"
,
pDnode
->
dnodeId
,
pVgroup
->
vgId
,
pVgid
->
role
,
syncRole
[
pVgid
->
role
]);
mTrace
(
"dnode:%d, receive status from dnode, vgId:%d status:%s last:%s"
,
pDnode
->
dnodeId
,
pVgroup
->
vgId
,
syncRole
[
pVload
->
role
],
syncRole
[
pVgid
->
role
]);
pVgid
->
role
=
pVload
->
role
;
if
(
pVload
->
role
==
TAOS_SYNC_ROLE_MASTER
)
{
pVgroup
->
inUse
=
i
;
...
...
src/mnode/src/mnodeWrite.c
浏览文件 @
cf4f0d95
...
...
@@ -50,7 +50,7 @@ int32_t mnodeProcessWrite(SMnodeMsg *pMsg) {
if
(
!
sdbIsMaster
())
{
SMnodeRsp
*
rpcRsp
=
&
pMsg
->
rpcRsp
;
SRpcEpSet
*
epSet
=
rpcMallocCont
(
sizeof
(
SRpcEpSet
));
mnodeGetMnodeEpSetForShell
(
epSet
);
mnodeGetMnodeEpSetForShell
(
epSet
,
true
);
rpcRsp
->
rsp
=
epSet
;
rpcRsp
->
len
=
sizeof
(
SRpcEpSet
);
...
...
src/query/inc/tsqlfunction.h
浏览文件 @
cf4f0d95
...
...
@@ -250,10 +250,9 @@ enum {
};
typedef
struct
STwaInfo
{
TSKEY
lastKey
;
int8_t
hasResult
;
// flag to denote has value
double
dOutput
;
double
lastValue
;
SPoint1
p
;
STimeWindow
win
;
}
STwaInfo
;
...
...
src/query/src/qExecutor.c
浏览文件 @
cf4f0d95
...
...
@@ -703,39 +703,11 @@ static FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_se
return
forwardStep
;
}
/**
* NOTE: the query status only set for the first scan of master scan.
*/
static
int32_t
doCheckQueryCompleted
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
TSKEY
lastKey
,
SResultRowInfo
*
pWindowResInfo
)
{
SQuery
*
pQuery
=
pRuntimeEnv
->
pQuery
;
if
(
pRuntimeEnv
->
scanFlag
!=
MASTER_SCAN
)
{
return
pWindowResInfo
->
size
;
}
// for group by normal column query, close time window and return.
if
(
!
QUERY_IS_INTERVAL_QUERY
(
pQuery
))
{
closeAllTimeWindow
(
pWindowResInfo
);
return
pWindowResInfo
->
size
;
}
// no qualified results exist, abort check
int32_t
numOfClosed
=
0
;
if
(
pWindowResInfo
->
size
==
0
)
{
return
pWindowResInfo
->
size
;
}
// query completed
if
((
lastKey
>=
pQuery
->
current
->
win
.
ekey
&&
QUERY_IS_ASC_QUERY
(
pQuery
))
||
(
lastKey
<=
pQuery
->
current
->
win
.
ekey
&&
!
QUERY_IS_ASC_QUERY
(
pQuery
)))
{
closeAllTimeWindow
(
pWindowResInfo
);
pWindowResInfo
->
curIndex
=
pWindowResInfo
->
size
-
1
;
setQueryStatus
(
pQuery
,
QUERY_COMPLETED
|
QUERY_RESBUF_FULL
);
}
else
{
// set the current index to be the last unclosed window
static
int32_t
updateResultRowCurrentIndex
(
SResultRowInfo
*
pWindowResInfo
,
TSKEY
lastKey
,
bool
ascQuery
)
{
int32_t
i
=
0
;
int64_t
skey
=
TSKEY_INITIAL_VAL
;
int32_t
numOfClosed
=
0
;
for
(
i
=
0
;
i
<
pWindowResInfo
->
size
;
++
i
)
{
SResultRow
*
pResult
=
pWindowResInfo
->
pResult
[
i
];
if
(
pResult
->
closed
)
{
...
...
@@ -744,8 +716,7 @@ static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKe
}
TSKEY
ekey
=
pResult
->
win
.
ekey
;
if
((
ekey
<=
lastKey
&&
QUERY_IS_ASC_QUERY
(
pQuery
))
||
(
pResult
->
win
.
skey
>=
lastKey
&&
!
QUERY_IS_ASC_QUERY
(
pQuery
)))
{
if
((
ekey
<=
lastKey
&&
ascQuery
)
||
(
pResult
->
win
.
skey
>=
lastKey
&&
!
ascQuery
))
{
closeTimeWindow
(
pWindowResInfo
,
i
);
}
else
{
skey
=
pResult
->
win
.
skey
;
...
...
@@ -759,9 +730,33 @@ static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKe
pWindowResInfo
->
curIndex
=
pWindowResInfo
->
size
-
1
;
}
else
{
pWindowResInfo
->
curIndex
=
i
;
pWindowResInfo
->
prevSKey
=
pWindowResInfo
->
pResult
[
pWindowResInfo
->
curIndex
]
->
win
.
skey
;
}
pWindowResInfo
->
prevSKey
=
pWindowResInfo
->
pResult
[
pWindowResInfo
->
curIndex
]
->
win
.
skey
;
return
numOfClosed
;
}
/**
* NOTE: the query status only set for the first scan of master scan.
*/
static
int32_t
doCheckQueryCompleted
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
TSKEY
lastKey
,
SResultRowInfo
*
pWindowResInfo
)
{
SQuery
*
pQuery
=
pRuntimeEnv
->
pQuery
;
if
(
pRuntimeEnv
->
scanFlag
!=
MASTER_SCAN
||
pWindowResInfo
->
size
==
0
)
{
return
pWindowResInfo
->
size
;
}
// no qualified results exist, abort check
int32_t
numOfClosed
=
0
;
bool
ascQuery
=
QUERY_IS_ASC_QUERY
(
pQuery
);
// query completed
if
((
lastKey
>=
pQuery
->
current
->
win
.
ekey
&&
ascQuery
)
||
(
lastKey
<=
pQuery
->
current
->
win
.
ekey
&&
(
!
ascQuery
)))
{
closeAllTimeWindow
(
pWindowResInfo
);
pWindowResInfo
->
curIndex
=
pWindowResInfo
->
size
-
1
;
setQueryStatus
(
pQuery
,
QUERY_COMPLETED
|
QUERY_RESBUF_FULL
);
}
else
{
// set the current index to be the last unclosed window
numOfClosed
=
updateResultRowCurrentIndex
(
pWindowResInfo
,
lastKey
,
ascQuery
);
// the number of completed slots are larger than the threshold, return current generated results to client.
if
(
numOfClosed
>
pQuery
->
rec
.
threshold
)
{
...
...
@@ -1050,24 +1045,6 @@ static void setNotInterpoWindowKey(SQLFunctionCtx* pCtx, int32_t numOfOutput, in
}
}
//static double getTSWindowInterpoVal(SColumnInfoData* pColInfo, int16_t srcColIndex, int16_t rowIndex, TSKEY key, char** prevRow, TSKEY* tsCols, int32_t step) {
// TSKEY start = tsCols[rowIndex];
// TSKEY prevTs = (rowIndex == 0)? *(TSKEY *) prevRow[0] : tsCols[rowIndex - step];
//
// double v1 = 0, v2 = 0, v = 0;
// char *prevVal = (rowIndex == 0)? prevRow[srcColIndex] : ((char*)pColInfo->pData) + (rowIndex - step) * pColInfo->info.bytes;
//
// GET_TYPED_DATA(v1, double, pColInfo->info.type, (char *)prevVal);
// GET_TYPED_DATA(v2, double, pColInfo->info.type, (char *)pColInfo->pData + rowIndex * pColInfo->info.bytes);
//
// SPoint point1 = (SPoint){.key = prevTs, .val = &v1};
// SPoint point2 = (SPoint){.key = start, .val = &v2};
// SPoint point = (SPoint){.key = key, .val = &v};
// taosGetLinearInterpolationVal(TSDB_DATA_TYPE_DOUBLE, &point1, &point2, &point);
//
// return v;
//}
// window start key interpolation
static
bool
setTimeWindowInterpolationStartTs
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
int32_t
pos
,
int32_t
numOfRows
,
SArray
*
pDataBlock
,
TSKEY
*
tsCols
,
STimeWindow
*
win
)
{
SQuery
*
pQuery
=
pRuntimeEnv
->
pQuery
;
...
...
@@ -1238,6 +1215,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
if
(
interp
)
{
setResultRowInterpo
(
pResult
,
RESULT_ROW_START_INTERP
);
}
}
else
{
setNotInterpoWindowKey
(
pRuntimeEnv
->
pCtx
,
pQuery
->
numOfOutput
,
RESULT_ROW_START_INTERP
);
}
done
=
resultRowInterpolated
(
pResult
,
RESULT_ROW_END_INTERP
);
...
...
@@ -1249,6 +1228,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
if
(
interp
)
{
setResultRowInterpo
(
pResult
,
RESULT_ROW_END_INTERP
);
}
}
else
{
setNotInterpoWindowKey
(
pRuntimeEnv
->
pCtx
,
pQuery
->
numOfOutput
,
RESULT_ROW_END_INTERP
);
}
}
...
...
@@ -1289,6 +1270,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
if
(
interp
)
{
setResultRowInterpo
(
pResult
,
RESULT_ROW_START_INTERP
);
}
}
else
{
setNotInterpoWindowKey
(
pRuntimeEnv
->
pCtx
,
pQuery
->
numOfOutput
,
RESULT_ROW_START_INTERP
);
}
done
=
resultRowInterpolated
(
pResult
,
RESULT_ROW_END_INTERP
);
...
...
@@ -1299,6 +1282,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
if
(
interp
)
{
setResultRowInterpo
(
pResult
,
RESULT_ROW_END_INTERP
);
}
}
else
{
setNotInterpoWindowKey
(
pRuntimeEnv
->
pCtx
,
pQuery
->
numOfOutput
,
RESULT_ROW_END_INTERP
);
}
}
...
...
@@ -1802,9 +1787,12 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl
// interval query with limit applied
int32_t
numOfRes
=
0
;
if
(
QUERY_IS_INTERVAL_QUERY
(
pQuery
)
||
pRuntimeEnv
->
groupbyNormalCol
)
{
if
(
QUERY_IS_INTERVAL_QUERY
(
pQuery
))
{
numOfRes
=
doCheckQueryCompleted
(
pRuntimeEnv
,
lastKey
,
pWindowResInfo
);
}
else
{
}
else
if
(
pRuntimeEnv
->
groupbyNormalCol
)
{
closeAllTimeWindow
(
pWindowResInfo
);
numOfRes
=
pWindowResInfo
->
size
;
}
else
{
// projection query
numOfRes
=
(
int32_t
)
getNumOfResult
(
pRuntimeEnv
);
// update the number of output result
...
...
@@ -4487,6 +4475,18 @@ static void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBloc
}
else
{
blockwiseApplyFunctions
(
pRuntimeEnv
,
pStatis
,
pDataBlockInfo
,
pWindowResInfo
,
searchFn
,
pDataBlock
);
}
if
(
QUERY_IS_INTERVAL_QUERY
(
pQuery
))
{
bool
ascQuery
=
QUERY_IS_ASC_QUERY
(
pQuery
);
// TODO refactor
if
((
pTableQueryInfo
->
lastKey
>=
pTableQueryInfo
->
win
.
ekey
&&
ascQuery
)
||
(
pTableQueryInfo
->
lastKey
<=
pTableQueryInfo
->
win
.
ekey
&&
(
!
ascQuery
)))
{
closeAllTimeWindow
(
pWindowResInfo
);
pWindowResInfo
->
curIndex
=
pWindowResInfo
->
size
-
1
;
}
else
{
updateResultRowCurrentIndex
(
pWindowResInfo
,
pTableQueryInfo
->
lastKey
,
ascQuery
);
}
}
}
bool
queryHasRemainResForTableQuery
(
SQueryRuntimeEnv
*
pRuntimeEnv
)
{
...
...
@@ -7635,7 +7635,7 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex
int32_t
code
=
TSDB_CODE_SUCCESS
;
if
(
ts
HalfCoresForQuery
)
{
if
(
ts
RetrieveBlockingModel
)
{
pQInfo
->
rspContext
=
pRspContext
;
tsem_wait
(
&
pQInfo
->
ready
);
*
buildRes
=
true
;
...
...
src/query/src/qFilterfunc.c
浏览文件 @
cf4f0d95
...
...
@@ -21,6 +21,12 @@
#include "tcompare.h"
#include "tsqlfunction.h"
#define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (4 * FLT_EPSILON))
#define FLT_GREATER(_x, _y) (!FLT_EQUAL((_x), (_y)) && ((_x) > (_y)))
#define FLT_LESS(_x, _y) (!FLT_EQUAL((_x), (_y)) && ((_x) < (_y)))
#define FLT_GREATEREQUAL(_x, _y) (FLT_EQUAL((_x), (_y)) || ((_x) > (_y)))
#define FLT_LESSEQUAL(_x, _y) (FLT_EQUAL((_x), (_y)) || ((_x) < (_y)))
bool
less_i8
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
int8_t
*
)
minval
<
pFilter
->
filterInfo
.
upperBndi
);
}
...
...
@@ -38,35 +44,35 @@ bool less_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) {
}
bool
less_ds
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
float
*
)
minval
<
pFilter
->
filterInfo
.
upperBndd
);
return
FLT_LESS
(
*
(
float
*
)
minval
,
pFilter
->
filterInfo
.
upperBndd
);
}
bool
less_dd
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
double
*
)
minval
<
pFilter
->
filterInfo
.
upperBndd
)
;
return
*
(
double
*
)
minval
<
pFilter
->
filterInfo
.
upperBndd
;
}
//////////////////////////////////////////////////////////////////
bool
large_i8
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
bool
large
r
_i8
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
int8_t
*
)
maxval
>
pFilter
->
filterInfo
.
lowerBndi
);
}
bool
large_i16
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
bool
large
r
_i16
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
int16_t
*
)
maxval
>
pFilter
->
filterInfo
.
lowerBndi
);
}
bool
large_i32
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
bool
large
r
_i32
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
int32_t
*
)
maxval
>
pFilter
->
filterInfo
.
lowerBndi
);
}
bool
large_i64
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
bool
large
r
_i64
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
int64_t
*
)
maxval
>
pFilter
->
filterInfo
.
lowerBndi
);
}
bool
large_ds
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
float
*
)
maxval
>
pFilter
->
filterInfo
.
lowerBndd
);
bool
large
r
_ds
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
FLT_GREATER
(
*
(
float
*
)
maxval
,
pFilter
->
filterInfo
.
lowerBndd
);
}
bool
large_dd
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
bool
large
r
_dd
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
double
*
)
maxval
>
pFilter
->
filterInfo
.
lowerBndd
);
}
/////////////////////////////////////////////////////////////////////
...
...
@@ -88,10 +94,14 @@ bool lessEqual_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) {
}
bool
lessEqual_ds
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
float
*
)
minval
<=
pFilter
->
filterInfo
.
upperBndd
);
return
FLT_LESSEQUAL
(
*
(
float
*
)
minval
,
pFilter
->
filterInfo
.
upperBndd
);
}
bool
lessEqual_dd
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
if
((
fabs
(
*
(
double
*
)
minval
)
-
pFilter
->
filterInfo
.
upperBndd
)
<=
2
*
DBL_EPSILON
)
{
return
true
;
}
return
(
*
(
double
*
)
minval
<=
pFilter
->
filterInfo
.
upperBndd
);
}
...
...
@@ -113,11 +123,15 @@ bool largeEqual_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) {
}
bool
largeEqual_ds
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
float
*
)
maxval
>=
pFilter
->
filterInfo
.
lowerBndd
);
return
FLT_GREATEREQUAL
(
*
(
float
*
)
maxval
,
pFilter
->
filterInfo
.
lowerBndd
);
}
bool
largeEqual_dd
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
double
*
)
maxval
>=
pFilter
->
filterInfo
.
lowerBndd
);
if
(
fabs
(
*
(
double
*
)
maxval
-
pFilter
->
filterInfo
.
lowerBndd
)
<=
2
*
DBL_EPSILON
)
{
return
true
;
}
return
(
*
(
double
*
)
maxval
-
pFilter
->
filterInfo
.
lowerBndd
>
(
2
*
DBL_EPSILON
));
}
////////////////////////////////////////////////////////////////////////
...
...
@@ -162,10 +176,12 @@ bool equal_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) {
}
}
// user specified input filter value and the original saved float value may needs to
// increase the tolerance to obtain the correct result.
bool
equal_ds
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
if
(
*
(
float
*
)
minval
==
*
(
float
*
)
maxval
)
{
return
(
fabs
(
*
(
float
*
)
minval
-
pFilter
->
filterInfo
.
lowerBndd
)
<=
FLT_EPSILON
);
}
else
{
/
* range filter */
return
FLT_EQUAL
(
*
(
float
*
)
minval
,
pFilter
->
filterInfo
.
lowerBndd
);
}
else
{
/
/ range filter
assert
(
*
(
float
*
)
minval
<
*
(
float
*
)
maxval
);
return
*
(
float
*
)
minval
<=
pFilter
->
filterInfo
.
lowerBndd
&&
*
(
float
*
)
maxval
>=
pFilter
->
filterInfo
.
lowerBndd
;
}
...
...
@@ -173,10 +189,9 @@ bool equal_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) {
bool
equal_dd
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
if
(
*
(
double
*
)
minval
==
*
(
double
*
)
maxval
)
{
return
(
*
(
double
*
)
minval
==
pFilter
->
filterInfo
.
lowerBndd
);
}
else
{
/
* range filter */
return
(
fabs
(
*
(
double
*
)
minval
-
pFilter
->
filterInfo
.
lowerBndd
)
<=
2
*
DBL_EPSILON
);
}
else
{
/
/ range filter
assert
(
*
(
double
*
)
minval
<
*
(
double
*
)
maxval
);
return
*
(
double
*
)
minval
<=
pFilter
->
filterInfo
.
lowerBndi
&&
*
(
double
*
)
maxval
>=
pFilter
->
filterInfo
.
lowerBndi
;
}
}
...
...
@@ -255,7 +270,7 @@ bool nequal_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) {
bool
nequal_ds
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
if
(
*
(
float
*
)
minval
==
*
(
float
*
)
maxval
)
{
return
(
*
(
float
*
)
minval
!=
pFilter
->
filterInfo
.
lowerBndd
);
return
!
FLT_EQUAL
(
*
(
float
*
)
minval
,
pFilter
->
filterInfo
.
lowerBndd
);
}
return
true
;
...
...
@@ -364,7 +379,8 @@ bool rangeFilter_i64_ei(SColumnFilterElem *pFilter, char *minval, char *maxval)
////////////////////////////////////////////////////////////////////////
bool
rangeFilter_ds_ii
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
float
*
)
minval
<=
pFilter
->
filterInfo
.
upperBndd
&&
*
(
float
*
)
maxval
>=
pFilter
->
filterInfo
.
lowerBndd
);
return
FLT_LESSEQUAL
(
*
(
float
*
)
minval
,
pFilter
->
filterInfo
.
upperBndd
)
&&
FLT_GREATEREQUAL
(
*
(
float
*
)
maxval
,
pFilter
->
filterInfo
.
lowerBndd
);
}
bool
rangeFilter_ds_ee
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
...
...
@@ -376,7 +392,8 @@ bool rangeFilter_ds_ie(SColumnFilterElem *pFilter, char *minval, char *maxval) {
}
bool
rangeFilter_ds_ei
(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
{
return
(
*
(
float
*
)
minval
<=
pFilter
->
filterInfo
.
upperBndd
&&
*
(
float
*
)
maxval
>
pFilter
->
filterInfo
.
lowerBndd
);
return
FLT_GREATER
(
*
(
float
*
)
maxval
,
pFilter
->
filterInfo
.
lowerBndd
)
&&
FLT_LESSEQUAL
(
*
(
float
*
)
minval
,
pFilter
->
filterInfo
.
upperBndd
);
}
//////////////////////////////////////////////////////////////////////////
...
...
@@ -400,7 +417,7 @@ bool rangeFilter_dd_ei(SColumnFilterElem *pFilter, char *minval, char *maxval) {
bool
(
*
filterFunc_i8
[])(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
=
{
NULL
,
less_i8
,
large_i8
,
large
r
_i8
,
equal_i8
,
lessEqual_i8
,
largeEqual_i8
,
...
...
@@ -413,7 +430,7 @@ bool (*filterFunc_i8[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
bool
(
*
filterFunc_i16
[])(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
=
{
NULL
,
less_i16
,
large_i16
,
large
r
_i16
,
equal_i16
,
lessEqual_i16
,
largeEqual_i16
,
...
...
@@ -426,7 +443,7 @@ bool (*filterFunc_i16[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
bool
(
*
filterFunc_i32
[])(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
=
{
NULL
,
less_i32
,
large_i32
,
large
r
_i32
,
equal_i32
,
lessEqual_i32
,
largeEqual_i32
,
...
...
@@ -439,7 +456,7 @@ bool (*filterFunc_i32[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
bool
(
*
filterFunc_i64
[])(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
=
{
NULL
,
less_i64
,
large_i64
,
large
r
_i64
,
equal_i64
,
lessEqual_i64
,
largeEqual_i64
,
...
...
@@ -452,7 +469,7 @@ bool (*filterFunc_i64[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
bool
(
*
filterFunc_ds
[])(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
=
{
NULL
,
less_ds
,
large_ds
,
large
r
_ds
,
equal_ds
,
lessEqual_ds
,
largeEqual_ds
,
...
...
@@ -465,7 +482,7 @@ bool (*filterFunc_ds[])(SColumnFilterElem *pFilter, char *minval, char *maxval)
bool
(
*
filterFunc_dd
[])(
SColumnFilterElem
*
pFilter
,
char
*
minval
,
char
*
maxval
)
=
{
NULL
,
less_dd
,
large_dd
,
large
r
_dd
,
equal_dd
,
lessEqual_dd
,
largeEqual_dd
,
...
...
@@ -551,7 +568,7 @@ bool (*rangeFilterFunc_dd[])(SColumnFilterElem *pFilter, char *minval, char *max
__filter_func_t
*
getRangeFilterFuncArray
(
int32_t
type
)
{
switch
(
type
)
{
case
TSDB_DATA_TYPE_BOOL
:
return
rangeFilterFunc_i8
;
case
TSDB_DATA_TYPE_BOOL
:
case
TSDB_DATA_TYPE_TINYINT
:
return
rangeFilterFunc_i8
;
case
TSDB_DATA_TYPE_SMALLINT
:
return
rangeFilterFunc_i16
;
case
TSDB_DATA_TYPE_INT
:
return
rangeFilterFunc_i32
;
...
...
@@ -565,7 +582,7 @@ __filter_func_t* getRangeFilterFuncArray(int32_t type) {
__filter_func_t
*
getValueFilterFuncArray
(
int32_t
type
)
{
switch
(
type
)
{
case
TSDB_DATA_TYPE_BOOL
:
return
filterFunc_i8
;
case
TSDB_DATA_TYPE_BOOL
:
case
TSDB_DATA_TYPE_TINYINT
:
return
filterFunc_i8
;
case
TSDB_DATA_TYPE_SMALLINT
:
return
filterFunc_i16
;
case
TSDB_DATA_TYPE_INT
:
return
filterFunc_i32
;
...
...
src/rpc/src/rpcMain.c
浏览文件 @
cf4f0d95
...
...
@@ -631,6 +631,7 @@ static void rpcReleaseConn(SRpcConn *pConn) {
// if there is an outgoing message, free it
if
(
pConn
->
outType
&&
pConn
->
pReqMsg
)
{
SRpcReqContext
*
pContext
=
pConn
->
pContext
;
if
(
pContext
)
{
if
(
pContext
->
pRsp
)
{
// for synchronous API, post semaphore to unblock app
pContext
->
pRsp
->
code
=
TSDB_CODE_RPC_APP_ERROR
;
...
...
@@ -640,6 +641,9 @@ static void rpcReleaseConn(SRpcConn *pConn) {
}
pContext
->
pConn
=
NULL
;
taosRemoveRef
(
tsRpcRefId
,
pContext
->
rid
);
}
else
{
assert
(
0
);
}
}
}
...
...
@@ -1083,7 +1087,11 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
if
(
code
==
TSDB_CODE_RPC_INVALID_TIME_STAMP
||
code
==
TSDB_CODE_RPC_AUTH_FAILURE
)
{
rpcCloseConn
(
pConn
);
}
if
(
pHead
->
msgType
+
1
>
1
&&
pHead
->
msgType
+
1
<
TSDB_MSG_TYPE_MAX
)
{
tDebug
(
"%s %p %p, %s is sent with error code:0x%x"
,
pRpc
->
label
,
pConn
,
(
void
*
)
pHead
->
ahandle
,
taosMsg
[
pHead
->
msgType
+
1
],
code
);
}
else
{
tError
(
"%s %p %p, %s is sent with error code:0x%x"
,
pRpc
->
label
,
pConn
,
(
void
*
)
pHead
->
ahandle
,
taosMsg
[
pHead
->
msgType
],
code
);
}
}
}
else
{
// msg is passed to app only parsing is ok
rpcProcessIncomingMsg
(
pConn
,
pHead
,
pContext
);
...
...
src/rpc/src/rpcTcp.c
浏览文件 @
cf4f0d95
...
...
@@ -242,7 +242,14 @@ static void *taosAcceptTcpConnection(void *arg) {
taosKeepTcpAlive
(
connFd
);
struct
timeval
to
=
{
1
,
0
};
taosSetSockOpt
(
connFd
,
SOL_SOCKET
,
SO_RCVTIMEO
,
&
to
,
sizeof
(
to
));
int32_t
ret
=
taosSetSockOpt
(
connFd
,
SOL_SOCKET
,
SO_RCVTIMEO
,
&
to
,
sizeof
(
to
));
if
(
ret
!=
0
)
{
taosCloseSocket
(
connFd
);
tError
(
"%s failed to set recv timeout fd(%s)for connection from:%s:%hu"
,
pServerObj
->
label
,
strerror
(
errno
),
taosInetNtoa
(
caddr
.
sin_addr
),
htons
(
caddr
.
sin_port
));
continue
;
}
// pick up the thread to handle this connection
pThreadObj
=
pServerObj
->
pThreadObj
[
threadId
];
...
...
src/rpc/test/rclient.c
浏览文件 @
cf4f0d95
...
...
@@ -188,7 +188,8 @@ int main(int argc, char *argv[]) {
tInfo
(
"it takes %.3f mseconds to send %d requests to server"
,
usedTime
,
numOfReqs
*
appThreads
);
tInfo
(
"Performance: %.3f requests per second, msgSize:%d bytes"
,
1000
.
0
*
numOfReqs
*
appThreads
/
usedTime
,
msgSize
);
getchar
();
int
ch
=
getchar
();
UNUSED
(
ch
);
taosCloseLog
();
...
...
src/sync/inc/syncInt.h
浏览文件 @
cf4f0d95
...
...
@@ -62,12 +62,15 @@ typedef struct {
typedef
struct
{
SSyncHead
syncHead
;
uint16_t
port
;
uint16_t
tranId
;
char
fqdn
[
TSDB_FQDN_LEN
];
int32_t
sourceId
;
// only for arbitrator
}
SFirstPkt
;
typedef
struct
{
int8_t
sync
;
int8_t
reserved
;
uint16_t
tranId
;
}
SFirstPktRsp
;
typedef
struct
{
...
...
@@ -187,6 +190,7 @@ void syncRestartConnection(SSyncPeer *pPeer);
void
syncBroadcastStatus
(
SSyncNode
*
pNode
);
void
syncAddPeerRef
(
SSyncPeer
*
pPeer
);
int32_t
syncDecPeerRef
(
SSyncPeer
*
pPeer
);
uint16_t
syncGenTranId
();
#ifdef __cplusplus
}
...
...
src/sync/src/syncMain.c
浏览文件 @
cf4f0d95
...
...
@@ -396,9 +396,7 @@ void syncConfirmForward(int64_t rid, uint64_t version, int32_t code) {
pFwdRsp
->
code
=
code
;
int32_t
msgLen
=
sizeof
(
SSyncHead
)
+
sizeof
(
SFwdRsp
);
int32_t
retLen
=
taosWriteMsg
(
pPeer
->
peerFd
,
msg
,
msgLen
);
if
(
retLen
==
msgLen
)
{
if
(
taosWriteMsg
(
pPeer
->
peerFd
,
msg
,
msgLen
)
==
msgLen
)
{
sTrace
(
"%s, forward-rsp is sent, code:%x hver:%"
PRIu64
,
pPeer
->
id
,
code
,
version
);
}
else
{
sDebug
(
"%s, failed to send forward ack, restart"
,
pPeer
->
id
);
...
...
@@ -795,7 +793,7 @@ void syncRestartConnection(SSyncPeer *pPeer) {
static
void
syncProcessSyncRequest
(
char
*
msg
,
SSyncPeer
*
pPeer
)
{
SSyncNode
*
pNode
=
pPeer
->
pSyncNode
;
s
Debug
(
"%s, sync-req is received"
,
pPeer
->
id
);
s
Info
(
"%s, sync-req is received"
,
pPeer
->
id
);
if
(
pPeer
->
ip
==
0
)
return
;
...
...
@@ -873,6 +871,7 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) {
firstPkt
.
syncHead
.
type
=
TAOS_SMSG_SYNC_REQ
;
firstPkt
.
syncHead
.
vgId
=
pNode
->
vgId
;
firstPkt
.
syncHead
.
len
=
sizeof
(
firstPkt
)
-
sizeof
(
SSyncHead
);
firstPkt
.
tranId
=
syncGenTranId
();
tstrncpy
(
firstPkt
.
fqdn
,
tsNodeFqdn
,
sizeof
(
firstPkt
.
fqdn
));
firstPkt
.
port
=
tsSyncPort
;
taosTmrReset
(
syncNotStarted
,
tsSyncTimer
*
1000
,
pPeer
,
tsSyncTmrCtrl
,
&
pPeer
->
timer
);
...
...
@@ -880,8 +879,7 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) {
if
(
taosWriteMsg
(
pPeer
->
peerFd
,
&
firstPkt
,
sizeof
(
firstPkt
))
!=
sizeof
(
firstPkt
))
{
sError
(
"%s, failed to send sync-req to peer"
,
pPeer
->
id
);
}
else
{
nodeSStatus
=
TAOS_SYNC_STATUS_START
;
sInfo
(
"%s, sync-req is sent to peer, set sstatus:%s"
,
pPeer
->
id
,
syncStatus
[
nodeSStatus
]);
sInfo
(
"%s, sync-req is sent to peer, tranId:%u, sstatus:%s"
,
pPeer
->
id
,
firstPkt
.
tranId
,
syncStatus
[
nodeSStatus
]);
}
}
...
...
@@ -1018,8 +1016,7 @@ static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack, int8_t type
pPeersStatus
->
peersStatus
[
i
].
version
=
pNode
->
peerInfo
[
i
]
->
version
;
}
int32_t
retLen
=
taosWriteMsg
(
pPeer
->
peerFd
,
msg
,
statusMsgLen
);
if
(
retLen
==
statusMsgLen
)
{
if
(
taosWriteMsg
(
pPeer
->
peerFd
,
msg
,
statusMsgLen
)
==
statusMsgLen
)
{
sDebug
(
"%s, status is sent, self:%s:%s:%"
PRIu64
", peer:%s:%s:%"
PRIu64
", ack:%d tranId:%u type:%s pfd:%d"
,
pPeer
->
id
,
syncRole
[
nodeRole
],
syncStatus
[
nodeSStatus
],
nodeVersion
,
syncRole
[
pPeer
->
role
],
syncStatus
[
pPeer
->
sstatus
],
pPeer
->
version
,
pPeersStatus
->
ack
,
pPeersStatus
->
tranId
,
...
...
@@ -1053,10 +1050,11 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) {
firstPkt
.
syncHead
.
type
=
TAOS_SMSG_STATUS
;
tstrncpy
(
firstPkt
.
fqdn
,
tsNodeFqdn
,
sizeof
(
firstPkt
.
fqdn
));
firstPkt
.
port
=
tsSyncPort
;
firstPkt
.
tranId
=
syncGenTranId
();
firstPkt
.
sourceId
=
pNode
->
vgId
;
// tell arbitrator its vgId
if
(
taosWriteMsg
(
connFd
,
&
firstPkt
,
sizeof
(
firstPkt
))
==
sizeof
(
firstPkt
))
{
sDebug
(
"%s, connection to peer server is setup, pfd:%d sfd:%d
"
,
pPeer
->
id
,
connFd
,
pPeer
->
syncF
d
);
sDebug
(
"%s, connection to peer server is setup, pfd:%d sfd:%d
tranId:%u"
,
pPeer
->
id
,
connFd
,
pPeer
->
syncFd
,
firstPkt
.
tranI
d
);
pPeer
->
peerFd
=
connFd
;
pPeer
->
role
=
TAOS_SYNC_ROLE_UNSYNCED
;
pPeer
->
pConn
=
taosAllocateTcpConn
(
tsTcpPool
,
pPeer
,
connFd
);
...
...
@@ -1093,7 +1091,9 @@ static void syncCreateRestoreDataThread(SSyncPeer *pPeer) {
pthread_attr_destroy
(
&
thattr
);
if
(
ret
<
0
)
{
sError
(
"%s, failed to create sync thread"
,
pPeer
->
id
);
SSyncNode
*
pNode
=
pPeer
->
pSyncNode
;
nodeSStatus
=
TAOS_SYNC_STATUS_INIT
;
sError
(
"%s, failed to create sync thread, set sstatus:%s"
,
pPeer
->
id
,
syncStatus
[
nodeSStatus
]);
taosClose
(
pPeer
->
syncFd
);
syncDecPeerRef
(
pPeer
);
}
else
{
...
...
@@ -1123,6 +1123,8 @@ static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) {
return
;
}
sDebug
(
"vgId:%d, firstPkt is received, tranId:%u"
,
vgId
,
firstPkt
.
tranId
);
SSyncNode
*
pNode
=
*
ppNode
;
pthread_mutex_lock
(
&
pNode
->
mutex
);
...
...
@@ -1141,6 +1143,9 @@ static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) {
// first packet tells what kind of link
if
(
firstPkt
.
syncHead
.
type
==
TAOS_SMSG_SYNC_DATA
)
{
pPeer
->
syncFd
=
connFd
;
nodeSStatus
=
TAOS_SYNC_STATUS_START
;
sInfo
(
"%s, sync-data pkt from master is received, tranId:%u, set sstatus:%s"
,
pPeer
->
id
,
firstPkt
.
tranId
,
syncStatus
[
nodeSStatus
]);
syncCreateRestoreDataThread
(
pPeer
);
}
else
{
sDebug
(
"%s, TCP connection is up, pfd:%d sfd:%d, old pfd:%d"
,
pPeer
->
id
,
connFd
,
pPeer
->
syncFd
,
pPeer
->
peerFd
);
...
...
@@ -1312,7 +1317,7 @@ static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle
}
// always update version
sTrace
(
"vgId:%d,
forward to peer
, replica:%d role:%s qtype:%s hver:%"
PRIu64
,
pNode
->
vgId
,
pNode
->
replica
,
sTrace
(
"vgId:%d,
update version
, replica:%d role:%s qtype:%s hver:%"
PRIu64
,
pNode
->
vgId
,
pNode
->
replica
,
syncRole
[
nodeRole
],
qtypeStr
[
qtype
],
pWalHead
->
version
);
nodeVersion
=
pWalHead
->
version
;
...
...
src/sync/src/syncRestore.c
浏览文件 @
cf4f0d95
...
...
@@ -36,6 +36,8 @@ static void syncRemoveExtraFile(SSyncPeer *pPeer, int32_t sindex, int32_t eindex
if
(
sindex
<
0
||
eindex
<
sindex
)
return
;
sDebug
(
"%s, extra files will be removed between sindex:%d and eindex:%d"
,
pPeer
->
id
,
sindex
,
eindex
);
while
(
1
)
{
name
[
0
]
=
0
;
magic
=
(
*
pNode
->
getFileInfo
)(
pNode
->
vgId
,
name
,
&
index
,
eindex
,
&
size
,
&
fversion
);
...
...
@@ -43,7 +45,7 @@ static void syncRemoveExtraFile(SSyncPeer *pPeer, int32_t sindex, int32_t eindex
snprintf
(
fname
,
sizeof
(
fname
),
"%s/%s"
,
pNode
->
path
,
name
);
(
void
)
remove
(
fname
);
s
Debug
(
"%s, %s is removed
"
,
pPeer
->
id
,
fname
);
s
Info
(
"%s, %s is removed for its extra
"
,
pPeer
->
id
,
fname
);
index
++
;
if
(
index
>
eindex
)
break
;
...
...
@@ -61,11 +63,12 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) {
bool
fileChanged
=
false
;
*
fversion
=
0
;
sinfo
.
index
=
0
;
sinfo
.
index
=
-
1
;
while
(
1
)
{
// read file info
int32_t
ret
=
taosReadMsg
(
pPeer
->
syncFd
,
&
(
minfo
),
sizeof
(
minfo
));
if
(
ret
<
0
)
{
minfo
.
index
=
-
1
;
int32_t
ret
=
taosReadMsg
(
pPeer
->
syncFd
,
&
minfo
,
sizeof
(
SFileInfo
));
if
(
ret
!=
sizeof
(
SFileInfo
)
||
minfo
.
index
==
-
1
)
{
sError
(
"%s, failed to read file info while restore file since %s"
,
pPeer
->
id
,
strerror
(
errno
));
break
;
}
...
...
@@ -75,7 +78,7 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) {
sDebug
(
"%s, no more files to restore"
,
pPeer
->
id
);
// remove extra files after the current index
syncRemoveExtraFile
(
pPeer
,
sinfo
.
index
+
1
,
TAOS_SYNC_MAX_INDEX
);
if
(
sinfo
.
index
!=
-
1
)
syncRemoveExtraFile
(
pPeer
,
sinfo
.
index
+
1
,
TAOS_SYNC_MAX_INDEX
);
code
=
0
;
break
;
}
...
...
@@ -96,7 +99,7 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) {
// send file ack
ret
=
taosWriteMsg
(
pPeer
->
syncFd
,
&
fileAck
,
sizeof
(
fileAck
));
if
(
ret
<
0
)
{
if
(
ret
!=
sizeof
(
fileAck
)
)
{
sError
(
"%s, failed to write file:%s ack while restore file since %s"
,
pPeer
->
id
,
minfo
.
name
,
strerror
(
errno
));
break
;
}
...
...
@@ -154,7 +157,7 @@ static int32_t syncRestoreWal(SSyncPeer *pPeer) {
while
(
1
)
{
ret
=
taosReadMsg
(
pPeer
->
syncFd
,
pHead
,
sizeof
(
SWalHead
));
if
(
ret
<
0
)
{
if
(
ret
!=
sizeof
(
SWalHead
)
)
{
sError
(
"%s, failed to read walhead while restore wal since %s"
,
pPeer
->
id
,
strerror
(
errno
));
break
;
}
...
...
@@ -166,7 +169,7 @@ static int32_t syncRestoreWal(SSyncPeer *pPeer) {
}
// wal sync over
ret
=
taosReadMsg
(
pPeer
->
syncFd
,
pHead
->
cont
,
pHead
->
len
);
if
(
ret
<
0
)
{
if
(
ret
!=
pHead
->
len
)
{
sError
(
"%s, failed to read walcont, len:%d while restore wal since %s"
,
pPeer
->
id
,
pHead
->
len
,
strerror
(
errno
));
break
;
}
...
...
@@ -286,11 +289,12 @@ static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) {
uint64_t
fversion
=
0
;
sInfo
(
"%s, start to restore, sstatus:%s"
,
pPeer
->
id
,
syncStatus
[
pPeer
->
sstatus
]);
SFirstPktRsp
firstPktRsp
=
{.
sync
=
1
};
if
(
taosWriteMsg
(
pPeer
->
syncFd
,
&
firstPktRsp
,
sizeof
(
SFirstPktRsp
))
<
0
)
{
SFirstPktRsp
firstPktRsp
=
{.
sync
=
1
,
.
tranId
=
syncGenTranId
()
};
if
(
taosWriteMsg
(
pPeer
->
syncFd
,
&
firstPktRsp
,
sizeof
(
SFirstPktRsp
))
!=
sizeof
(
SFirstPktRsp
)
)
{
sError
(
"%s, failed to send sync firstPkt rsp since %s"
,
pPeer
->
id
,
strerror
(
errno
));
return
-
1
;
}
sDebug
(
"%s, send firstPktRsp to peer, tranId:%u"
,
pPeer
->
id
,
firstPktRsp
.
tranId
);
sInfo
(
"%s, start to restore file, set sstatus:%s"
,
pPeer
->
id
,
syncStatus
[
nodeSStatus
]);
int32_t
code
=
syncRestoreFile
(
pPeer
,
&
fversion
);
...
...
src/sync/src/syncRetrieve.c
浏览文件 @
cf4f0d95
...
...
@@ -58,7 +58,7 @@ static int32_t syncGetFileVersion(SSyncNode *pNode, SSyncPeer *pPeer) {
uint64_t
fver
,
wver
;
int32_t
code
=
(
*
pNode
->
getVersion
)(
pNode
->
vgId
,
&
fver
,
&
wver
);
if
(
code
!=
0
)
{
sDebug
(
"%s, vnode is commiting while retrieve, last fver:%"
PRIu64
,
pPeer
->
id
,
pPeer
->
lastFileVer
);
sDebug
(
"%s, vnode is commiting while
get fver for
retrieve, last fver:%"
PRIu64
,
pPeer
->
id
,
pPeer
->
lastFileVer
);
return
-
1
;
}
...
...
@@ -92,7 +92,10 @@ static int32_t syncRetrieveFile(SSyncPeer *pPeer) {
int32_t
code
=
-
1
;
char
name
[
TSDB_FILENAME_LEN
*
2
]
=
{
0
};
if
(
syncGetFileVersion
(
pNode
,
pPeer
)
<
0
)
return
-
1
;
if
(
syncGetFileVersion
(
pNode
,
pPeer
)
<
0
)
{
pPeer
->
fileChanged
=
1
;
return
-
1
;
}
while
(
1
)
{
// retrieve file info
...
...
@@ -100,12 +103,11 @@ static int32_t syncRetrieveFile(SSyncPeer *pPeer) {
fileInfo
.
size
=
0
;
fileInfo
.
magic
=
(
*
pNode
->
getFileInfo
)(
pNode
->
vgId
,
fileInfo
.
name
,
&
fileInfo
.
index
,
TAOS_SYNC_MAX_INDEX
,
&
fileInfo
.
size
,
&
fileInfo
.
fversion
);
// fileInfo.size = htonl(size);
sDebug
(
"%s, file:%s info is sent, size:%"
PRId64
,
pPeer
->
id
,
fileInfo
.
name
,
fileInfo
.
size
);
// send the file info
int32_t
ret
=
taosWriteMsg
(
pPeer
->
syncFd
,
&
(
fileInfo
),
sizeof
(
fileInfo
));
if
(
ret
<
0
)
{
if
(
ret
!=
sizeof
(
fileInfo
)
)
{
code
=
-
1
;
sError
(
"%s, failed to write file:%s info while retrieve file since %s"
,
pPeer
->
id
,
fileInfo
.
name
,
strerror
(
errno
));
break
;
...
...
@@ -119,8 +121,8 @@ static int32_t syncRetrieveFile(SSyncPeer *pPeer) {
}
// wait for the ack from peer
ret
=
taosReadMsg
(
pPeer
->
syncFd
,
&
fileAck
,
sizeof
(
f
ileAck
));
if
(
ret
<
0
)
{
ret
=
taosReadMsg
(
pPeer
->
syncFd
,
&
fileAck
,
sizeof
(
SF
ileAck
));
if
(
ret
!=
sizeof
(
SFileAck
)
)
{
code
=
-
1
;
sError
(
"%s, failed to read file:%s ack while retrieve file since %s"
,
pPeer
->
id
,
fileInfo
.
name
,
strerror
(
errno
));
break
;
...
...
@@ -384,12 +386,15 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) {
}
if
(
code
==
0
)
{
pPeer
->
sstatus
=
TAOS_SYNC_STATUS_CACHE
;
sInfo
(
"%s, wal retrieve is finished, set sstatus:%s"
,
pPeer
->
id
,
syncStatus
[
pPeer
->
sstatus
]);
SWalHead
walHead
;
memset
(
&
walHead
,
0
,
sizeof
(
walHead
));
taosWriteMsg
(
pPeer
->
syncFd
,
&
walHead
,
sizeof
(
walHead
));
if
(
taosWriteMsg
(
pPeer
->
syncFd
,
&
walHead
,
sizeof
(
walHead
))
==
sizeof
(
walHead
))
{
pPeer
->
sstatus
=
TAOS_SYNC_STATUS_CACHE
;
sInfo
(
"%s, wal retrieve is finished, set sstatus:%s"
,
pPeer
->
id
,
syncStatus
[
pPeer
->
sstatus
]);
}
else
{
sError
(
"%s, failed to send last wal record since %s"
,
pPeer
->
id
,
strerror
(
errno
));
code
=
-
1
;
}
}
else
{
sError
(
"%s, failed to send wal since %s, code:0x%x"
,
pPeer
->
id
,
strerror
(
errno
),
code
);
}
...
...
@@ -404,20 +409,23 @@ static int32_t syncRetrieveFirstPkt(SSyncPeer *pPeer) {
memset
(
&
firstPkt
,
0
,
sizeof
(
firstPkt
));
firstPkt
.
syncHead
.
type
=
TAOS_SMSG_SYNC_DATA
;
firstPkt
.
syncHead
.
vgId
=
pNode
->
vgId
;
firstPkt
.
tranId
=
syncGenTranId
();
tstrncpy
(
firstPkt
.
fqdn
,
tsNodeFqdn
,
sizeof
(
firstPkt
.
fqdn
));
firstPkt
.
port
=
tsSyncPort
;
if
(
taosWriteMsg
(
pPeer
->
syncFd
,
&
firstPkt
,
sizeof
(
firstPkt
))
<
0
)
{
sError
(
"%s, failed to send sync firstPkt since %s
"
,
pPeer
->
id
,
strerror
(
errno
)
);
if
(
taosWriteMsg
(
pPeer
->
syncFd
,
&
firstPkt
,
sizeof
(
firstPkt
))
!=
sizeof
(
firstPkt
)
)
{
sError
(
"%s, failed to send sync firstPkt since %s
, tranId:%u"
,
pPeer
->
id
,
strerror
(
errno
),
firstPkt
.
tranId
);
return
-
1
;
}
sDebug
(
"%s, send sync-data pkt to peer, tranId:%u"
,
pPeer
->
id
,
firstPkt
.
tranId
);
SFirstPktRsp
firstPktRsp
;
if
(
taosReadMsg
(
pPeer
->
syncFd
,
&
firstPktRsp
,
sizeof
(
SFirstPktRsp
))
<
0
)
{
sError
(
"%s, failed to read sync firstPkt rsp since %s
"
,
pPeer
->
id
,
strerror
(
errno
)
);
if
(
taosReadMsg
(
pPeer
->
syncFd
,
&
firstPktRsp
,
sizeof
(
SFirstPktRsp
))
!=
sizeof
(
SFirstPktRsp
)
)
{
sError
(
"%s, failed to read sync firstPkt rsp since %s
, tranId:%u"
,
pPeer
->
id
,
strerror
(
errno
),
firstPkt
.
tranId
);
return
-
1
;
}
sDebug
(
"%s, recv firstPktRsp from peer, tranId:%u"
,
pPeer
->
id
,
firstPkt
.
tranId
);
return
0
;
}
...
...
src/util/inc/tconfig.h
浏览文件 @
cf4f0d95
...
...
@@ -78,6 +78,7 @@ extern char * tsCfgStatusStr[];
void
taosReadGlobalLogCfg
();
bool
taosReadGlobalCfg
();
void
taosPrintGlobalCfg
();
void
taosDumpGlobalCfg
();
void
taosInitConfigOption
(
SGlobalCfg
cfg
);
SGlobalCfg
*
taosGetConfigOption
(
const
char
*
option
);
...
...
src/util/src/tconfig.c
浏览文件 @
cf4f0d95
...
...
@@ -397,3 +397,57 @@ void taosPrintGlobalCfg() {
taosPrintOsInfo
();
}
static
void
taosDumpCfg
(
SGlobalCfg
*
cfg
)
{
int
optionLen
=
(
int
)
strlen
(
cfg
->
option
);
int
blankLen
=
TSDB_CFG_PRINT_LEN
-
optionLen
;
blankLen
=
blankLen
<
0
?
0
:
blankLen
;
char
blank
[
TSDB_CFG_PRINT_LEN
];
memset
(
blank
,
' '
,
TSDB_CFG_PRINT_LEN
);
blank
[
blankLen
]
=
0
;
switch
(
cfg
->
valType
)
{
case
TAOS_CFG_VTYPE_INT16
:
printf
(
" %s:%s%d%s
\n
"
,
cfg
->
option
,
blank
,
*
((
int16_t
*
)
cfg
->
ptr
),
tsGlobalUnit
[
cfg
->
unitType
]);
break
;
case
TAOS_CFG_VTYPE_INT32
:
printf
(
" %s:%s%d%s
\n
"
,
cfg
->
option
,
blank
,
*
((
int32_t
*
)
cfg
->
ptr
),
tsGlobalUnit
[
cfg
->
unitType
]);
break
;
case
TAOS_CFG_VTYPE_FLOAT
:
printf
(
" %s:%s%f%s
\n
"
,
cfg
->
option
,
blank
,
*
((
float
*
)
cfg
->
ptr
),
tsGlobalUnit
[
cfg
->
unitType
]);
break
;
case
TAOS_CFG_VTYPE_STRING
:
case
TAOS_CFG_VTYPE_IPSTR
:
case
TAOS_CFG_VTYPE_DIRECTORY
:
printf
(
" %s:%s%s%s
\n
"
,
cfg
->
option
,
blank
,
(
char
*
)
cfg
->
ptr
,
tsGlobalUnit
[
cfg
->
unitType
]);
break
;
default:
break
;
}
}
void
taosDumpGlobalCfg
()
{
printf
(
"taos global config:
\n
"
);
printf
(
"==================================
\n
"
);
for
(
int
i
=
0
;
i
<
tsGlobalConfigNum
;
++
i
)
{
SGlobalCfg
*
cfg
=
tsGlobalConfig
+
i
;
if
(
tscEmbedded
==
0
&&
!
(
cfg
->
cfgType
&
TSDB_CFG_CTYPE_B_CLIENT
))
continue
;
if
(
cfg
->
cfgType
&
TSDB_CFG_CTYPE_B_NOT_PRINT
)
continue
;
if
(
!
(
cfg
->
cfgType
&
TSDB_CFG_CTYPE_B_SHOW
))
continue
;
taosDumpCfg
(
cfg
);
}
printf
(
"
\n
taos local config:
\n
"
);
printf
(
"==================================
\n
"
);
for
(
int
i
=
0
;
i
<
tsGlobalConfigNum
;
++
i
)
{
SGlobalCfg
*
cfg
=
tsGlobalConfig
+
i
;
if
(
tscEmbedded
==
0
&&
!
(
cfg
->
cfgType
&
TSDB_CFG_CTYPE_B_CLIENT
))
continue
;
if
(
cfg
->
cfgType
&
TSDB_CFG_CTYPE_B_NOT_PRINT
)
continue
;
if
(
cfg
->
cfgType
&
TSDB_CFG_CTYPE_B_SHOW
)
continue
;
taosDumpCfg
(
cfg
);
}
}
src/util/src/ttimer.c
浏览文件 @
cf4f0d95
...
...
@@ -225,10 +225,11 @@ static void addToWheel(tmr_obj_t* timer, uint32_t delay) {
}
static
bool
removeFromWheel
(
tmr_obj_t
*
timer
)
{
if
(
timer
->
wheel
>=
tListLen
(
wheels
))
{
uint8_t
wheelIdx
=
timer
->
wheel
;
if
(
wheelIdx
>=
tListLen
(
wheels
))
{
return
false
;
}
time_wheel_t
*
wheel
=
wheels
+
timer
->
wheel
;
time_wheel_t
*
wheel
=
wheels
+
wheelIdx
;
bool
removed
=
false
;
pthread_mutex_lock
(
&
wheel
->
mutex
);
...
...
src/vnode/inc/vnodeInt.h
浏览文件 @
cf4f0d95
...
...
@@ -46,6 +46,7 @@ typedef struct {
int8_t
isFull
;
int8_t
isCommiting
;
uint64_t
version
;
// current version
uint64_t
cversion
;
// version while commit start
uint64_t
fversion
;
// version on saved data file
void
*
wqueue
;
// write queue
void
*
qqueue
;
// read query queue
...
...
src/vnode/src/vnodeMain.c
浏览文件 @
cf4f0d95
...
...
@@ -203,8 +203,8 @@ int32_t vnodeOpen(int32_t vgId) {
code
=
vnodeReadVersion
(
pVnode
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
vError
(
"vgId:%d, failed to read version, generate it from data file"
,
pVnode
->
vgId
);
// Allow vnode start even when read
version fails, set version as walV
ersion or zero
vError
(
"vgId:%d, failed to read
file
version, generate it from data file"
,
pVnode
->
vgId
);
// Allow vnode start even when read
file version fails, set file version as wal v
ersion or zero
// vnodeCleanUp(pVnode);
// return code;
}
...
...
@@ -447,6 +447,7 @@ static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno) {
if
(
status
==
TSDB_STATUS_COMMIT_START
)
{
pVnode
->
isCommiting
=
1
;
pVnode
->
cversion
=
pVnode
->
version
;
vDebug
(
"vgId:%d, start commit, fver:%"
PRIu64
" vver:%"
PRIu64
,
pVnode
->
vgId
,
pVnode
->
fversion
,
pVnode
->
version
);
if
(
!
vnodeInInitStatus
(
pVnode
))
{
return
walRenew
(
pVnode
->
wal
);
...
...
@@ -457,7 +458,7 @@ static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno) {
if
(
status
==
TSDB_STATUS_COMMIT_OVER
)
{
pVnode
->
isCommiting
=
0
;
pVnode
->
isFull
=
0
;
pVnode
->
fversion
=
pVnode
->
version
;
pVnode
->
fversion
=
pVnode
->
c
version
;
vDebug
(
"vgId:%d, commit over, fver:%"
PRIu64
" vver:%"
PRIu64
,
pVnode
->
vgId
,
pVnode
->
fversion
,
pVnode
->
version
);
if
(
!
vnodeInInitStatus
(
pVnode
))
{
walRemoveOneOldFile
(
pVnode
->
wal
);
...
...
src/vnode/src/vnodeRead.c
浏览文件 @
cf4f0d95
...
...
@@ -133,7 +133,7 @@ static int32_t vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void **qhandle, void
int32_t
code
=
vnodeWriteToRQueue
(
pVnode
,
qhandle
,
0
,
TAOS_QTYPE_QUERY
,
&
rpcMsg
);
if
(
code
==
TSDB_CODE_SUCCESS
)
{
v
Debug
(
"QInfo:%p add to vread queue for exec query"
,
*
qhandle
);
v
Trace
(
"QInfo:%p add to vread queue for exec query"
,
*
qhandle
);
}
return
code
;
...
...
@@ -164,7 +164,7 @@ static int32_t vnodeDumpQueryResult(SRspRet *pRet, void *pVnode, void **handle,
}
}
else
{
*
freeHandle
=
true
;
v
Debug
(
"QInfo:%p exec completed, free handle:%d"
,
*
handle
,
*
freeHandle
);
v
Trace
(
"QInfo:%p exec completed, free handle:%d"
,
*
handle
,
*
freeHandle
);
}
}
else
{
SRetrieveTableRsp
*
pRsp
=
(
SRetrieveTableRsp
*
)
rpcMallocCont
(
sizeof
(
SRetrieveTableRsp
));
...
...
@@ -266,7 +266,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
}
if
(
handle
!=
NULL
)
{
v
Debug
(
"vgId:%d, QInfo:%p, dnode query msg disposed, create qhandle and returns to app"
,
vgId
,
*
handle
);
v
Trace
(
"vgId:%d, QInfo:%p, dnode query msg disposed, create qhandle and returns to app"
,
vgId
,
*
handle
);
code
=
vnodePutItemIntoReadQueue
(
pVnode
,
handle
,
pRead
->
rpcHandle
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
pRsp
->
code
=
code
;
...
...
@@ -278,10 +278,10 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
assert
(
pCont
!=
NULL
);
void
**
qhandle
=
(
void
**
)
pRead
->
qhandle
;
v
Debug
(
"vgId:%d, QInfo:%p, dnode continues to exec query"
,
pVnode
->
vgId
,
*
qhandle
);
v
Trace
(
"vgId:%d, QInfo:%p, dnode continues to exec query"
,
pVnode
->
vgId
,
*
qhandle
);
// In the retrieve blocking model, only 50% CPU will be used in query processing
if
(
ts
HalfCoresForQuery
)
{
if
(
ts
RetrieveBlockingModel
)
{
qTableQuery
(
*
qhandle
);
// do execute query
qReleaseQInfo
(
pVnode
->
qMgmt
,
(
void
**
)
&
qhandle
,
false
);
}
else
{
...
...
@@ -294,7 +294,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
pRead
->
rpcHandle
=
qGetResultRetrieveMsg
(
*
qhandle
);
assert
(
pRead
->
rpcHandle
!=
NULL
);
v
Debug
(
"vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p"
,
pVnode
->
vgId
,
*
qhandle
,
v
Trace
(
"vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p"
,
pVnode
->
vgId
,
*
qhandle
,
pRead
->
rpcHandle
);
// set the real rsp error code
...
...
@@ -327,7 +327,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
pRetrieve
->
free
=
htons
(
pRetrieve
->
free
);
pRetrieve
->
qhandle
=
htobe64
(
pRetrieve
->
qhandle
);
v
Debug
(
"vgId:%d, QInfo:%p, retrieve msg is disposed, free:%d, conn:%p"
,
pVnode
->
vgId
,
(
void
*
)
pRetrieve
->
qhandle
,
v
Trace
(
"vgId:%d, QInfo:%p, retrieve msg is disposed, free:%d, conn:%p"
,
pVnode
->
vgId
,
(
void
*
)
pRetrieve
->
qhandle
,
pRetrieve
->
free
,
pRead
->
rpcHandle
);
memset
(
pRet
,
0
,
sizeof
(
SRspRet
));
...
...
@@ -380,7 +380,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
freeHandle
=
true
;
}
else
{
// result is not ready, return immediately
// Only effects in the non-blocking model
if
(
!
ts
HalfCoresForQuery
)
{
if
(
!
ts
RetrieveBlockingModel
)
{
if
(
!
buildRes
)
{
assert
(
pRead
->
rpcHandle
!=
NULL
);
...
...
@@ -410,6 +410,6 @@ int32_t vnodeNotifyCurrentQhandle(void *handle, void *qhandle, int32_t vgId) {
pMsg
->
header
.
vgId
=
htonl
(
vgId
);
pMsg
->
header
.
contLen
=
htonl
(
sizeof
(
SRetrieveTableMsg
));
v
Debug
(
"QInfo:%p register qhandle to connect:%p"
,
qhandle
,
handle
);
v
Trace
(
"QInfo:%p register qhandle to connect:%p"
,
qhandle
,
handle
);
return
rpcReportProgress
(
handle
,
(
char
*
)
pMsg
,
sizeof
(
SRetrieveTableMsg
));
}
src/vnode/src/vnodeWrite.c
浏览文件 @
cf4f0d95
...
...
@@ -243,8 +243,10 @@ int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rpar
int32_t
queued
=
atomic_add_fetch_32
(
&
pVnode
->
queuedWMsg
,
1
);
if
(
queued
>
MAX_QUEUED_MSG_NUM
)
{
vDebug
(
"vgId:%d, too many msg:%d in vwqueue, flow control"
,
pVnode
->
vgId
,
queued
);
taosMsleep
(
1
);
int32_t
ms
=
(
queued
/
MAX_QUEUED_MSG_NUM
)
*
10
+
3
;
if
(
ms
>
100
)
ms
=
100
;
vDebug
(
"vgId:%d, too many msg:%d in vwqueue, flow control %dms"
,
pVnode
->
vgId
,
queued
,
ms
);
taosMsleep
(
ms
);
}
code
=
vnodePerformFlowCtrl
(
pWrite
);
...
...
@@ -271,6 +273,8 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
SVnodeObj
*
pVnode
=
pWrite
->
pVnode
;
int32_t
code
=
TSDB_CODE_VND_SYNCING
;
if
(
pVnode
->
flowctrlLevel
<=
0
)
code
=
TSDB_CODE_VND_IS_FLOWCTRL
;
pWrite
->
processedCount
++
;
if
(
pWrite
->
processedCount
>
100
)
{
vError
(
"vgId:%d, msg:%p, failed to process since %s, retry:%d"
,
pVnode
->
vgId
,
pWrite
,
tstrerror
(
code
),
...
...
@@ -290,8 +294,8 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
static
int32_t
vnodePerformFlowCtrl
(
SVWriteMsg
*
pWrite
)
{
SVnodeObj
*
pVnode
=
pWrite
->
pVnode
;
if
(
pVnode
->
flowctrlLevel
<=
0
)
return
0
;
if
(
pWrite
->
qtype
!=
TAOS_QTYPE_RPC
)
return
0
;
if
(
pVnode
->
queuedWMsg
<
MAX_QUEUED_MSG_NUM
&&
pVnode
->
flowctrlLevel
<=
0
)
return
0
;
if
(
tsFlowCtrl
==
0
)
{
int32_t
ms
=
pow
(
2
,
pVnode
->
flowctrlLevel
+
2
);
...
...
src/wal/inc/walInt.h
浏览文件 @
cf4f0d95
...
...
@@ -38,7 +38,7 @@ extern int32_t wDebugFlag;
#define WAL_SIGNATURE ((uint32_t)(0xFAFBFDFE))
#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
#define WAL_FILE_LEN (WAL_PATH_LEN + 32)
#define WAL_FILE_NUM 3
#define WAL_FILE_NUM
1 //
3
typedef
struct
{
uint64_t
version
;
...
...
src/wal/src/walWrite.c
浏览文件 @
cf4f0d95
...
...
@@ -173,7 +173,7 @@ int32_t walRestore(void *handle, void *pVnode, FWalWrite writeFp) {
continue
;
}
wInfo
(
"vgId:%d, file:%s, restore success
"
,
pWal
->
vgId
,
walName
);
wInfo
(
"vgId:%d, file:%s, restore success
, wver:%"
PRIu64
,
pWal
->
vgId
,
walName
,
pWal
->
version
);
count
++
;
}
...
...
@@ -267,8 +267,6 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
return
TAOS_SYSTEM_ERROR
(
errno
);
}
wDebug
(
"vgId:%d, file:%s, start to restore"
,
pWal
->
vgId
,
name
);
int32_t
code
=
TSDB_CODE_SUCCESS
;
int64_t
offset
=
0
;
SWalHead
*
pHead
=
buffer
;
...
...
tests/Jenkinsfile
浏览文件 @
cf4f0d95
// execute this before anything else, including requesting any time on an agent
if
(
currentBuild
.
rawBuild
.
getCauses
().
toString
().
contains
(
'BranchIndexingCause'
))
{
print
"INFO: Build skipped due to trigger being Branch Indexing"
currentBuild
.
result
=
'ABORTED'
// optional, gives a better hint to the user that it's been skipped, rather than the default which shows it's successful
return
}
properties
([
pipelineTriggers
([
githubPush
()])])
node
{
git
url:
'https://github.com/taosdata/TDengine.git'
}
def
pre_test
(){
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
...
...
@@ -19,15 +6,14 @@ def pre_test(){
}
sh
'''
cd ${WKC}
rm -rf *
cd ${WK}
git reset --hard
git checkout
develop
git checkout
${BRANCH}
git pull
cd ${WKC}
rm -rf *
mv ${WORKSPACE}/* .
git submodule update
cd ${WK}
git reset --hard
git checkout ${BRANCH}
git pull
export TZ=Asia/Harbin
date
rm -rf ${WK}/debug
...
...
@@ -36,13 +22,13 @@ def pre_test(){
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
'''
return
1
}
pipeline
{
agent
none
environment
{
BRANCH
=
'develop'
WK
=
'/var/lib/jenkins/workspace/TDinternal'
WKC
=
'/var/lib/jenkins/workspace/TDinternal/community'
}
...
...
@@ -50,13 +36,13 @@ pipeline {
stages
{
stage
(
'Parallel test stage'
)
{
parallel
{
stage
(
'pyt
hon p1
'
)
{
agent
{
label
'
p1
'
}
stage
(
'pyt
est
'
)
{
agent
{
label
'
184
'
}
steps
{
pre_test
()
sh
'''
cd ${WKC}/tests
./test-all.sh p
1
./test-all.sh p
ytest
date'''
}
}
...
...
@@ -64,6 +50,12 @@ pipeline {
agent
{
label
'master'
}
steps
{
pre_test
()
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/pytest
python3 concurrent_inquiry.py -c 1
'''
}
sh
'''
cd ${WKC}/tests
./test-all.sh b1
...
...
@@ -72,9 +64,12 @@ pipeline {
}
stage
(
'test_crash_gen'
)
{
agent
{
label
"
b2
"
}
agent
{
label
"
185
"
}
steps
{
pre_test
()
sh
'''
cd ${WKC}/tests/pytest
'''
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/pytest
...
...
@@ -88,7 +83,6 @@ pipeline {
'''
}
sh
'''
date
cd ${WKC}/tests
./test-all.sh b2
date
...
...
@@ -97,42 +91,177 @@ pipeline {
}
stage
(
'test_valgrind'
)
{
agent
{
label
"
b3
"
}
agent
{
label
"
186
"
}
steps
{
pre_test
()
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WKC}/tests/pytest
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
'''
}
sh
'''
date
cd ${WKC}/tests
./test-all.sh b3
date'''
}
}
stage
(
'
python p2
'
){
agent
{
label
"
p2
"
}
stage
(
'
connector
'
){
agent
{
label
"
release
"
}
steps
{
pre_test
()
sh
'''
cd ${WORKSPACE}
git checkout develop
'''
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
date
cd ${WKC}/tests
./test-all.sh p2
date
cd ${WORKSPACE}/tests/gotest
bash batchtest.sh
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single >/dev/null
java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
'''
}
}
}
stage
(
'arm64_build'
){
agent
{
label
'arm64'
}
steps
{
sh
'''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
stage
(
'arm32_build'
){
agent
{
label
'arm32'
}
steps
{
catchError
(
buildResult:
'SUCCESS'
,
stageResult:
'FAILURE'
)
{
sh
'''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
}
}
}
}
post
{
success
{
emailext
(
subject:
"SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'"
,
body:
'''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>'''
,
to:
"yqliu@taosdata.com,pxiao@taosdata.com"
,
from:
"support@taosdata.com"
)
}
failure
{
emailext
(
subject:
"FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'"
,
body:
'''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>'''
,
to:
"yqliu@taosdata.com,pxiao@taosdata.com"
,
from:
"support@taosdata.com"
)
}
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/.gitignore
0 → 100644
浏览文件 @
cf4f0d95
HELP.md
target/
!.mvn/wrapper/maven-wrapper.jar
!**/src/main/**/target/
!**/src/test/**/target/
### STS ###
.apt_generated
.classpath
.factorypath
.project
.settings
.springBeans
.sts4-cache
### IntelliJ IDEA ###
.idea
*.iws
*.iml
*.ipr
### NetBeans ###
/nbproject/private/
/nbbuild/
/dist/
/nbdist/
/.nb-gradle/
build/
!**/src/main/**/build/
!**/src/test/**/build/
### VS Code ###
.vscode/
tests/examples/JDBC/taosdemo/.mvn/wrapper/MavenWrapperDownloader.java
0 → 100644
浏览文件 @
cf4f0d95
/*
* Copyright 2007-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import
java.net.*
;
import
java.io.*
;
import
java.nio.channels.*
;
import
java.util.Properties
;
public
class
MavenWrapperDownloader
{
private
static
final
String
WRAPPER_VERSION
=
"0.5.6"
;
/**
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
*/
private
static
final
String
DEFAULT_DOWNLOAD_URL
=
"https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+
WRAPPER_VERSION
+
"/maven-wrapper-"
+
WRAPPER_VERSION
+
".jar"
;
/**
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
* use instead of the default one.
*/
private
static
final
String
MAVEN_WRAPPER_PROPERTIES_PATH
=
".mvn/wrapper/maven-wrapper.properties"
;
/**
* Path where the maven-wrapper.jar will be saved to.
*/
private
static
final
String
MAVEN_WRAPPER_JAR_PATH
=
".mvn/wrapper/maven-wrapper.jar"
;
/**
* Name of the property which should be used to override the default download url for the wrapper.
*/
private
static
final
String
PROPERTY_NAME_WRAPPER_URL
=
"wrapperUrl"
;
public
static
void
main
(
String
args
[])
{
System
.
out
.
println
(
"- Downloader started"
);
File
baseDirectory
=
new
File
(
args
[
0
]);
System
.
out
.
println
(
"- Using base directory: "
+
baseDirectory
.
getAbsolutePath
());
// If the maven-wrapper.properties exists, read it and check if it contains a custom
// wrapperUrl parameter.
File
mavenWrapperPropertyFile
=
new
File
(
baseDirectory
,
MAVEN_WRAPPER_PROPERTIES_PATH
);
String
url
=
DEFAULT_DOWNLOAD_URL
;
if
(
mavenWrapperPropertyFile
.
exists
())
{
FileInputStream
mavenWrapperPropertyFileInputStream
=
null
;
try
{
mavenWrapperPropertyFileInputStream
=
new
FileInputStream
(
mavenWrapperPropertyFile
);
Properties
mavenWrapperProperties
=
new
Properties
();
mavenWrapperProperties
.
load
(
mavenWrapperPropertyFileInputStream
);
url
=
mavenWrapperProperties
.
getProperty
(
PROPERTY_NAME_WRAPPER_URL
,
url
);
}
catch
(
IOException
e
)
{
System
.
out
.
println
(
"- ERROR loading '"
+
MAVEN_WRAPPER_PROPERTIES_PATH
+
"'"
);
}
finally
{
try
{
if
(
mavenWrapperPropertyFileInputStream
!=
null
)
{
mavenWrapperPropertyFileInputStream
.
close
();
}
}
catch
(
IOException
e
)
{
// Ignore ...
}
}
}
System
.
out
.
println
(
"- Downloading from: "
+
url
);
File
outputFile
=
new
File
(
baseDirectory
.
getAbsolutePath
(),
MAVEN_WRAPPER_JAR_PATH
);
if
(!
outputFile
.
getParentFile
().
exists
())
{
if
(!
outputFile
.
getParentFile
().
mkdirs
())
{
System
.
out
.
println
(
"- ERROR creating output directory '"
+
outputFile
.
getParentFile
().
getAbsolutePath
()
+
"'"
);
}
}
System
.
out
.
println
(
"- Downloading to: "
+
outputFile
.
getAbsolutePath
());
try
{
downloadFileFromURL
(
url
,
outputFile
);
System
.
out
.
println
(
"Done"
);
System
.
exit
(
0
);
}
catch
(
Throwable
e
)
{
System
.
out
.
println
(
"- Error downloading"
);
e
.
printStackTrace
();
System
.
exit
(
1
);
}
}
private
static
void
downloadFileFromURL
(
String
urlString
,
File
destination
)
throws
Exception
{
if
(
System
.
getenv
(
"MVNW_USERNAME"
)
!=
null
&&
System
.
getenv
(
"MVNW_PASSWORD"
)
!=
null
)
{
String
username
=
System
.
getenv
(
"MVNW_USERNAME"
);
char
[]
password
=
System
.
getenv
(
"MVNW_PASSWORD"
).
toCharArray
();
Authenticator
.
setDefault
(
new
Authenticator
()
{
@Override
protected
PasswordAuthentication
getPasswordAuthentication
()
{
return
new
PasswordAuthentication
(
username
,
password
);
}
});
}
URL
website
=
new
URL
(
urlString
);
ReadableByteChannel
rbc
;
rbc
=
Channels
.
newChannel
(
website
.
openStream
());
FileOutputStream
fos
=
new
FileOutputStream
(
destination
);
fos
.
getChannel
().
transferFrom
(
rbc
,
0
,
Long
.
MAX_VALUE
);
fos
.
close
();
rbc
.
close
();
}
}
tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.jar
0 → 100644
浏览文件 @
cf4f0d95
文件已添加
tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.properties
0 → 100644
浏览文件 @
cf4f0d95
distributionUrl
=
https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip
wrapperUrl
=
https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar
tests/examples/JDBC/taosdemo/mvnw
0 → 100755
浏览文件 @
cf4f0d95
#!/bin/sh
# ----------------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Maven Start Up Batch script
#
# Required ENV vars:
# ------------------
# JAVA_HOME - location of a JDK home dir
#
# Optional ENV vars
# -----------------
# M2_HOME - location of maven2's installed home dir
# MAVEN_OPTS - parameters passed to the Java VM when running Maven
# e.g. to debug Maven itself, use
# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
# ----------------------------------------------------------------------------
if
[
-z
"
$MAVEN_SKIP_RC
"
]
;
then
if
[
-f
/etc/mavenrc
]
;
then
.
/etc/mavenrc
fi
if
[
-f
"
$HOME
/.mavenrc"
]
;
then
.
"
$HOME
/.mavenrc"
fi
fi
# OS specific support. $var _must_ be set to either true or false.
cygwin
=
false
darwin
=
false
mingw
=
false
case
"
$(
uname
)
"
in
CYGWIN
*
)
cygwin
=
true
;;
MINGW
*
)
mingw
=
true
;;
Darwin
*
)
darwin
=
true
# Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
# See https://developer.apple.com/library/mac/qa/qa1170/_index.html
if
[
-z
"
$JAVA_HOME
"
]
;
then
if
[
-x
"/usr/libexec/java_home"
]
;
then
export
JAVA_HOME
=
"
$(
/usr/libexec/java_home
)
"
else
export
JAVA_HOME
=
"/Library/Java/Home"
fi
fi
;;
esac
if
[
-z
"
$JAVA_HOME
"
]
;
then
if
[
-r
/etc/gentoo-release
]
;
then
JAVA_HOME
=
$(
java-config
--jre-home
)
fi
fi
if
[
-z
"
$M2_HOME
"
]
;
then
## resolve links - $0 may be a link to maven's home
PRG
=
"
$0
"
# need this for relative symlinks
while
[
-h
"
$PRG
"
]
;
do
ls
=
$(
ls
-ld
"
$PRG
"
)
link
=
$(
expr
"
$ls
"
:
'.*-> \(.*\)$'
)
if
expr
"
$link
"
:
'/.*'
>
/dev/null
;
then
PRG
=
"
$link
"
else
PRG
=
"
$(
dirname
"
$PRG
"
)
/
$link
"
fi
done
saveddir
=
$(
pwd
)
M2_HOME
=
$(
dirname
"
$PRG
"
)
/..
# make it fully qualified
M2_HOME
=
$(
cd
"
$M2_HOME
"
&&
pwd
)
cd
"
$saveddir
"
# echo Using m2 at $M2_HOME
fi
# For Cygwin, ensure paths are in UNIX format before anything is touched
if
$cygwin
;
then
[
-n
"
$M2_HOME
"
]
&&
M2_HOME
=
$(
cygpath
--unix
"
$M2_HOME
"
)
[
-n
"
$JAVA_HOME
"
]
&&
JAVA_HOME
=
$(
cygpath
--unix
"
$JAVA_HOME
"
)
[
-n
"
$CLASSPATH
"
]
&&
CLASSPATH
=
$(
cygpath
--path
--unix
"
$CLASSPATH
"
)
fi
# For Mingw, ensure paths are in UNIX format before anything is touched
if
$mingw
;
then
[
-n
"
$M2_HOME
"
]
&&
M2_HOME
=
"
$(
(
cd
"
$M2_HOME
"
pwd
)
)
"
[
-n
"
$JAVA_HOME
"
]
&&
JAVA_HOME
=
"
$(
(
cd
"
$JAVA_HOME
"
pwd
)
)
"
fi
if
[
-z
"
$JAVA_HOME
"
]
;
then
javaExecutable
=
"
$(
which javac
)
"
if
[
-n
"
$javaExecutable
"
]
&&
!
[
"
$(
expr
\"
$javaExecutable
\"
:
'\([^ ]*\)'
)
"
=
"no"
]
;
then
# readlink(1) is not available as standard on Solaris 10.
readLink
=
$(
which
readlink
)
if
[
!
$(
expr
"
$readLink
"
:
'\([^ ]*\)'
)
=
"no"
]
;
then
if
$darwin
;
then
javaHome
=
"
$(
dirname
\"
$javaExecutable
\"
)
"
javaExecutable
=
"
$(
cd
\"
$javaHome
\"
&&
pwd
-P
)
/javac"
else
javaExecutable
=
"
$(
readlink
-f
\"
$javaExecutable
\"
)
"
fi
javaHome
=
"
$(
dirname
\"
$javaExecutable
\"
)
"
javaHome
=
$(
expr
"
$javaHome
"
:
'\(.*\)/bin'
)
JAVA_HOME
=
"
$javaHome
"
export
JAVA_HOME
fi
fi
fi
if
[
-z
"
$JAVACMD
"
]
;
then
if
[
-n
"
$JAVA_HOME
"
]
;
then
if
[
-x
"
$JAVA_HOME
/jre/sh/java"
]
;
then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD
=
"
$JAVA_HOME
/jre/sh/java"
else
JAVACMD
=
"
$JAVA_HOME
/bin/java"
fi
else
JAVACMD
=
"
$(
which java
)
"
fi
fi
if
[
!
-x
"
$JAVACMD
"
]
;
then
echo
"Error: JAVA_HOME is not defined correctly."
>
&2
echo
" We cannot execute
$JAVACMD
"
>
&2
exit
1
fi
if
[
-z
"
$JAVA_HOME
"
]
;
then
echo
"Warning: JAVA_HOME environment variable is not set."
fi
CLASSWORLDS_LAUNCHER
=
org.codehaus.plexus.classworlds.launcher.Launcher
# traverses directory structure from process work directory to filesystem root
# first directory with .mvn subdirectory is considered project base directory
find_maven_basedir
()
{
if
[
-z
"
$1
"
]
;
then
echo
"Path not specified to find_maven_basedir"
return
1
fi
basedir
=
"
$1
"
wdir
=
"
$1
"
while
[
"
$wdir
"
!=
'/'
]
;
do
if
[
-d
"
$wdir
"
/.mvn
]
;
then
basedir
=
$wdir
break
fi
# workaround for JBEAP-8937 (on Solaris 10/Sparc)
if
[
-d
"
${
wdir
}
"
]
;
then
wdir
=
$(
cd
"
$wdir
/.."
pwd
)
fi
# end of workaround
done
echo
"
${
basedir
}
"
}
# concatenates all lines of a file
concat_lines
()
{
if
[
-f
"
$1
"
]
;
then
echo
"
$(
tr
-s
'\n'
' '
<
"
$1
"
)
"
fi
}
BASE_DIR
=
$(
find_maven_basedir
"
$(
pwd
)
"
)
if
[
-z
"
$BASE_DIR
"
]
;
then
exit
1
fi
##########################################################################################
# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
# This allows using the maven wrapper in projects that prohibit checking in binary data.
##########################################################################################
if
[
-r
"
$BASE_DIR
/.mvn/wrapper/maven-wrapper.jar"
]
;
then
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
"Found .mvn/wrapper/maven-wrapper.jar"
fi
else
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
"Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
fi
if
[
-n
"
$MVNW_REPOURL
"
]
;
then
jarUrl
=
"
$MVNW_REPOURL
/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
else
jarUrl
=
"https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
fi
while
IFS
=
"="
read
key value
;
do
case
"
$key
"
in
wrapperUrl
)
jarUrl
=
"
$value
"
break
;;
esac
done
<
"
$BASE_DIR
/.mvn/wrapper/maven-wrapper.properties"
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
"Downloading from:
$jarUrl
"
fi
wrapperJarPath
=
"
$BASE_DIR
/.mvn/wrapper/maven-wrapper.jar"
if
$cygwin
;
then
wrapperJarPath
=
$(
cygpath
--path
--windows
"
$wrapperJarPath
"
)
fi
if
command
-v
wget
>
/dev/null
;
then
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
"Found wget ... using wget"
fi
if
[
-z
"
$MVNW_USERNAME
"
]
||
[
-z
"
$MVNW_PASSWORD
"
]
;
then
wget
"
$jarUrl
"
-O
"
$wrapperJarPath
"
else
wget
--http-user
=
$MVNW_USERNAME
--http-password
=
$MVNW_PASSWORD
"
$jarUrl
"
-O
"
$wrapperJarPath
"
fi
elif
command
-v
curl
>
/dev/null
;
then
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
"Found curl ... using curl"
fi
if
[
-z
"
$MVNW_USERNAME
"
]
||
[
-z
"
$MVNW_PASSWORD
"
]
;
then
curl
-o
"
$wrapperJarPath
"
"
$jarUrl
"
-f
else
curl
--user
$MVNW_USERNAME
:
$MVNW_PASSWORD
-o
"
$wrapperJarPath
"
"
$jarUrl
"
-f
fi
else
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
"Falling back to using Java to download"
fi
javaClass
=
"
$BASE_DIR
/.mvn/wrapper/MavenWrapperDownloader.java"
# For Cygwin, switch paths to Windows format before running javac
if
$cygwin
;
then
javaClass
=
$(
cygpath
--path
--windows
"
$javaClass
"
)
fi
if
[
-e
"
$javaClass
"
]
;
then
if
[
!
-e
"
$BASE_DIR
/.mvn/wrapper/MavenWrapperDownloader.class"
]
;
then
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
" - Compiling MavenWrapperDownloader.java ..."
fi
# Compiling the Java class
(
"
$JAVA_HOME
/bin/javac"
"
$javaClass
"
)
fi
if
[
-e
"
$BASE_DIR
/.mvn/wrapper/MavenWrapperDownloader.class"
]
;
then
# Running the downloader
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
" - Running MavenWrapperDownloader.java ..."
fi
(
"
$JAVA_HOME
/bin/java"
-cp
.mvn/wrapper MavenWrapperDownloader
"
$MAVEN_PROJECTBASEDIR
"
)
fi
fi
fi
fi
##########################################################################################
# End of extension
##########################################################################################
export
MAVEN_PROJECTBASEDIR
=
${
MAVEN_BASEDIR
:-
"
$BASE_DIR
"
}
if
[
"
$MVNW_VERBOSE
"
=
true
]
;
then
echo
$MAVEN_PROJECTBASEDIR
fi
MAVEN_OPTS
=
"
$(
concat_lines
"
$MAVEN_PROJECTBASEDIR
/.mvn/jvm.config"
)
$MAVEN_OPTS
"
# For Cygwin, switch paths to Windows format before running java
if
$cygwin
;
then
[
-n
"
$M2_HOME
"
]
&&
M2_HOME
=
$(
cygpath
--path
--windows
"
$M2_HOME
"
)
[
-n
"
$JAVA_HOME
"
]
&&
JAVA_HOME
=
$(
cygpath
--path
--windows
"
$JAVA_HOME
"
)
[
-n
"
$CLASSPATH
"
]
&&
CLASSPATH
=
$(
cygpath
--path
--windows
"
$CLASSPATH
"
)
[
-n
"
$MAVEN_PROJECTBASEDIR
"
]
&&
MAVEN_PROJECTBASEDIR
=
$(
cygpath
--path
--windows
"
$MAVEN_PROJECTBASEDIR
"
)
fi
# Provide a "standardized" way to retrieve the CLI args that will
# work with both Windows and non-Windows executions.
MAVEN_CMD_LINE_ARGS
=
"
$MAVEN_CONFIG
$@
"
export
MAVEN_CMD_LINE_ARGS
WRAPPER_LAUNCHER
=
org.apache.maven.wrapper.MavenWrapperMain
exec
"
$JAVACMD
"
\
$MAVEN_OPTS
\
-classpath
"
$MAVEN_PROJECTBASEDIR
/.mvn/wrapper/maven-wrapper.jar"
\
"-Dmaven.home=
${
M2_HOME
}
"
"-Dmaven.multiModuleProjectDirectory=
${
MAVEN_PROJECTBASEDIR
}
"
\
${
WRAPPER_LAUNCHER
}
$MAVEN_CONFIG
"
$@
"
tests/examples/JDBC/taosdemo/mvnw.cmd
0 → 100644
浏览文件 @
cf4f0d95
@REM ----------------------------------------------------------------------------
@REM Licensed to the Apache Software Foundation (ASF) under one
@REM or more contributor license agreements. See the NOTICE file
@REM distributed with this work for additional information
@REM regarding copyright ownership. The ASF licenses this file
@REM to you under the Apache License, Version 2.0 (the
@REM "License"); you may not use this file except in compliance
@REM with the License. You may obtain a copy of the License at
@REM
@REM https://www.apache.org/licenses/LICENSE-2.0
@REM
@REM Unless required by applicable law or agreed to in writing,
@REM software distributed under the License is distributed on an
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@REM KIND, either express or implied. See the License for the
@REM specific language governing permissions and limitations
@REM under the License.
@REM ----------------------------------------------------------------------------
@REM ----------------------------------------------------------------------------
@REM Maven Start Up Batch script
@REM
@REM Required ENV vars:
@REM JAVA_HOME - location of a JDK home dir
@REM
@REM Optional ENV vars
@REM M2_HOME - location of maven2's installed home dir
@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending
@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
@REM e.g. to debug Maven itself, use
@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
@REM ----------------------------------------------------------------------------
@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
@echo
off
@REM set title of command window
title
%
0
@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'
@if
"
%MAVEN_BATCH_ECHO%
"
==
"on"
echo
%MAVEN_BATCH_ECHO%
@REM set %HOME% to equivalent of $HOME
if
"
%HOME%
"
==
""
(
set
"HOME=
%HOMEDRIVE%%
HOMEPATH
%
"
)
@REM Execute a user defined script before this one
if
not
"
%MAVEN_SKIP_RC%
"
==
""
goto
skipRcPre
@REM check for pre script, once with legacy .bat ending and once with .cmd ending
if
exist
"
%HOME%
\mavenrc_pre.bat"
call
"
%HOME%
\mavenrc_pre.bat"
if
exist
"
%HOME%
\mavenrc_pre.cmd"
call
"
%HOME%
\mavenrc_pre.cmd"
:skipRcPre
@setlocal
set
ERROR_CODE
=
0
@REM To isolate internal variables from possible post scripts, we use another setlocal
@setlocal
@REM ==== START VALIDATION ====
if
not
"
%JAVA_HOME%
"
==
""
goto
OkJHome
echo
.
echo
Error
:
JAVA_HOME
not
found
in
your
environment
.
>&
2
echo
Please
set
the
JAVA_HOME
variable
in
your
environment
to
match
the
>&
2
echo
location
of
your
Java
installation
.
>&
2
echo
.
goto
error
:OkJHome
if
exist
"
%JAVA_HOME%
\bin\java.exe"
goto
init
echo
.
echo
Error
:
JAVA_HOME
is
set
to
an
invalid
directory
.
>&
2
echo
JAVA_HOME
=
"
%JAVA_HOME%
"
>&
2
echo
Please
set
the
JAVA_HOME
variable
in
your
environment
to
match
the
>&
2
echo
location
of
your
Java
installation
.
>&
2
echo
.
goto
error
@REM ==== END VALIDATION ====
:init
@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
@REM Fallback to current working directory if not found.
set
MAVEN_PROJECTBASEDIR
=
%MAVEN_BASEDIR%
IF
NOT
"
%MAVEN_PROJECTBASEDIR%
"
==
""
goto
endDetectBaseDir
set
EXEC_DIR
=
%CD%
set
WDIR
=
%EXEC_DIR%
:findBaseDir
IF
EXIST
"
%WDIR%
"
\.mvn
goto
baseDirFound
cd
..
IF
"
%WDIR%
"
==
"
%CD%
"
goto
baseDirNotFound
set
WDIR
=
%CD%
goto
findBaseDir
:baseDirFound
set
MAVEN_PROJECTBASEDIR
=
%WDIR%
cd
"
%EXEC_DIR%
"
goto
endDetectBaseDir
:baseDirNotFound
set
MAVEN_PROJECTBASEDIR
=
%EXEC_DIR%
cd
"
%EXEC_DIR%
"
:endDetectBaseDir
IF
NOT
EXIST
"
%MAVEN_PROJECTBASEDIR%
\.mvn\jvm.config"
goto
endReadAdditionalConfig
@setlocal
EnableExtensions
EnableDelayedExpansion
for
/F
"usebackq delims="
%%a
in
(
"
%MAVEN_PROJECTBASEDIR%
\.mvn\jvm.config"
)
do
set
JVM_CONFIG_MAVEN_PROPS
=
!JVM_CONFIG_MAVEN_PROPS!
%%a
@endlocal
&
set
JVM_CONFIG_MAVEN_PROPS
=
%JVM_CONFIG_MAVEN_PROPS%
:endReadAdditionalConfig
SET
MAVEN_JAVA_EXE
=
"
%JAVA_HOME%
\bin\java.exe"
set
WRAPPER_JAR
=
"
%MAVEN_PROJECTBASEDIR%
\.mvn\wrapper\maven-wrapper.jar"
set
WRAPPER_LAUNCHER
=
org
.apache.maven.wrapper.MavenWrapperMain
set
DOWNLOAD_URL
=
"https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
FOR
/F
"tokens=1,2 delims=="
%%A
IN
(
"
%MAVEN_PROJECTBASEDIR%
\.mvn\wrapper\maven-wrapper.properties"
)
DO
(
IF
"
%%A
"
==
"wrapperUrl"
SET
DOWNLOAD_URL
=
%%B
)
@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
if
exist
%WRAPPER_JAR%
(
if
"
%MVNW_VERBOSE%
"
==
"true"
(
echo
Found
%WRAPPER_JAR%
)
)
else
(
if
not
"
%MVNW_REPOURL%
"
==
""
(
SET
DOWNLOAD_URL
=
"
%MVNW_REPOURL%
/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
)
if
"
%MVNW_VERBOSE%
"
==
"true"
(
echo
Couldn
't find
%WRAPPER_JAR%
, downloading it ...
echo Downloading from:
%DOWNLOAD_URL%
)
powershell -Command "&{"
^
"$webclient = new-object System.Net.WebClient;"
^
"if (-not ([string]::IsNullOrEmpty('
%MVNW_USERNAME%
') -and [string]::IsNullOrEmpty('
%MVNW_PASSWORD%
'))) {"
^
"$webclient.Credentials = new-object System.Net.NetworkCredential('
%MVNW_USERNAME%
', '
%MVNW_PASSWORD%
');"
^
"}"
^
"[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('
%DOWNLOAD_URL%
', '
%WRAPPER_JAR%
')"
^
"}"
if "
%MVNW_VERBOSE%
" == "true" (
echo Finished downloading
%WRAPPER_JAR%
)
)
@REM End of extension
@REM Provide a "standardized" way to retrieve the CLI args that will
@REM work with both Windows and non-Windows executions.
set MAVEN_CMD_LINE_ARGS=
%
*
%MAVEN_JAVA_EXE%
%JVM_CONFIG_MAVEN_PROPS%
%MAVEN_OPTS%
%MAVEN_DEBUG_OPTS%
-classpath
%WRAPPER_JAR%
"-Dmaven.multiModuleProjectDirectory=
%MAVEN_PROJECTBASEDIR%
"
%WRAPPER_LAUNCHER%
%MAVEN_CONFIG%
%
*
if ERRORLEVEL 1 goto error
goto end
:error
set ERROR_CODE=1
:end
@endlocal & set ERROR_CODE=
%ERROR_CODE%
if not "
%MAVEN_SKIP_RC%
" == "" goto skipRcPost
@REM check for post script, once with legacy .bat ending and once with .cmd ending
if exist "
%HOME%
\mavenrc_post.bat" call "
%HOME%
\mavenrc_post.bat"
if exist "
%HOME%
\mavenrc_post.cmd" call "
%HOME%
\mavenrc_post.cmd"
:skipRcPost
@REM pause the script if MAVEN_BATCH_PAUSE is set to '
on
'
if "
%MAVEN_BATCH_PAUSE%
" == "on" pause
if "
%MAVEN_TERMINATE_CMD%
" == "on" exit
%ERROR_CODE%
exit /B
%ERROR_CODE%
tests/examples/JDBC/taosdemo/pom.xml
0 → 100644
浏览文件 @
cf4f0d95
<?xml version="1.0" encoding="UTF-8"?>
<project
xmlns=
"http://maven.apache.org/POM/4.0.0"
xmlns:xsi=
"http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation=
"http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"
>
<modelVersion>
4.0.0
</modelVersion>
<parent>
<groupId>
org.springframework.boot
</groupId>
<artifactId>
spring-boot-starter-parent
</artifactId>
<version>
2.4.0
</version>
<relativePath/>
<!-- lookup parent from repository -->
</parent>
<groupId>
com.taosdata
</groupId>
<artifactId>
taosdemo
</artifactId>
<version>
2.0
</version>
<name>
taosdemo
</name>
<description>
Demo project for TDengine
</description>
<properties>
<java.version>
1.8
</java.version>
</properties>
<dependencies>
<!-- taos jdbc -->
<dependency>
<groupId>
com.taosdata.jdbc
</groupId>
<artifactId>
taos-jdbcdriver
</artifactId>
<version>
2.0.14
</version>
</dependency>
<!-- mysql -->
<dependency>
<groupId>
mysql
</groupId>
<artifactId>
mysql-connector-java
</artifactId>
<version>
5.1.47
</version>
</dependency>
<!-- mybatis-plus -->
<dependency>
<groupId>
com.baomidou
</groupId>
<artifactId>
mybatis-plus-boot-starter
</artifactId>
<version>
3.1.2
</version>
</dependency>
<!-- log4j -->
<dependency>
<groupId>
log4j
</groupId>
<artifactId>
log4j
</artifactId>
<version>
1.2.17
</version>
</dependency>
<!-- springboot -->
<dependency>
<groupId>
org.springframework.boot
</groupId>
<artifactId>
spring-boot-starter-jdbc
</artifactId>
</dependency>
<dependency>
<groupId>
org.springframework.boot
</groupId>
<artifactId>
spring-boot-starter-thymeleaf
</artifactId>
</dependency>
<dependency>
<groupId>
org.springframework.boot
</groupId>
<artifactId>
spring-boot-starter-web
</artifactId>
</dependency>
<dependency>
<groupId>
org.mybatis.spring.boot
</groupId>
<artifactId>
mybatis-spring-boot-starter
</artifactId>
<version>
2.1.4
</version>
</dependency>
<dependency>
<groupId>
junit
</groupId>
<artifactId>
junit
</artifactId>
<version>
4.12
</version>
<scope>
test
</scope>
</dependency>
<dependency>
<groupId>
org.springframework.boot
</groupId>
<artifactId>
spring-boot-devtools
</artifactId>
<scope>
runtime
</scope>
<optional>
true
</optional>
</dependency>
<dependency>
<groupId>
org.projectlombok
</groupId>
<artifactId>
lombok
</artifactId>
<optional>
true
</optional>
</dependency>
<dependency>
<groupId>
org.springframework.boot
</groupId>
<artifactId>
spring-boot-starter-test
</artifactId>
<scope>
test
</scope>
</dependency>
</dependencies>
<build>
<resources>
<resource>
<directory>
src/main/resources
</directory>
<includes>
<include>
**/*.properties
</include>
<include>
**/*.xml
</include>
</includes>
<filtering>
true
</filtering>
</resource>
<resource>
<directory>
src/main/java
</directory>
<includes>
<include>
**/*.properties
</include>
<include>
**/*.xml
</include>
</includes>
</resource>
</resources>
<plugins>
<plugin>
<groupId>
org.springframework.boot
</groupId>
<artifactId>
spring-boot-maven-plugin
</artifactId>
</plugin>
</plugins>
</build>
</project>
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosdemoApplication.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo
;
import
org.mybatis.spring.annotation.MapperScan
;
import
org.springframework.boot.SpringApplication
;
import
org.springframework.boot.autoconfigure.SpringBootApplication
;
@MapperScan
(
basePackages
=
{
"com.taosdata.taosdemo.mapper"
})
@SpringBootApplication
public
class
TaosdemoApplication
{
public
static
void
main
(
String
[]
args
)
{
SpringApplication
.
run
(
TaosdemoApplication
.
class
,
args
);
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/TaosDemoCommandLineRunner.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.components
;
import
com.taosdata.taosdemo.domain.*
;
import
com.taosdata.taosdemo.service.DatabaseService
;
import
com.taosdata.taosdemo.service.SubTableService
;
import
com.taosdata.taosdemo.service.SuperTableService
;
import
com.taosdata.taosdemo.service.data.SubTableMetaGenerator
;
import
com.taosdata.taosdemo.service.data.SubTableValueGenerator
;
import
com.taosdata.taosdemo.service.data.SuperTableMetaGenerator
;
import
com.taosdata.taosdemo.utils.JdbcTaosdemoConfig
;
import
org.apache.log4j.Logger
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.boot.CommandLineRunner
;
import
org.springframework.stereotype.Component
;
import
java.util.*
;
import
java.util.concurrent.TimeUnit
;
@Component
public
class
TaosDemoCommandLineRunner
implements
CommandLineRunner
{
private
static
Logger
logger
=
Logger
.
getLogger
(
TaosDemoCommandLineRunner
.
class
);
@Autowired
private
DatabaseService
databaseService
;
@Autowired
private
SuperTableService
superTableService
;
@Autowired
private
SubTableService
subTableService
;
private
SuperTableMeta
superTableMeta
;
private
List
<
SubTableMeta
>
subTableMetaList
;
private
List
<
SubTableValue
>
subTableValueList
;
private
List
<
List
<
SubTableValue
>>
dataList
;
@Override
public
void
run
(
String
...
args
)
throws
Exception
{
// 读配置参数
JdbcTaosdemoConfig
config
=
new
JdbcTaosdemoConfig
(
args
);
boolean
isHelp
=
Arrays
.
asList
(
args
).
contains
(
"--help"
);
if
(
isHelp
)
{
JdbcTaosdemoConfig
.
printHelp
();
System
.
exit
(
0
);
}
// 准备数据
prepareData
(
config
);
// 创建数据库
createDatabaseTask
(
config
);
// 建表
createTableTask
(
config
);
// 插入
insertTask
(
config
);
// 查询: 1. 生成查询语句, 2. 执行查询
// 删除表
if
(
config
.
dropTable
)
{
superTableService
.
drop
(
config
.
database
,
config
.
superTable
);
}
System
.
exit
(
0
);
}
private
void
createDatabaseTask
(
JdbcTaosdemoConfig
config
)
{
long
start
=
System
.
currentTimeMillis
();
Map
<
String
,
String
>
databaseParam
=
new
HashMap
<>();
databaseParam
.
put
(
"database"
,
config
.
database
);
databaseParam
.
put
(
"keep"
,
Integer
.
toString
(
config
.
keep
));
databaseParam
.
put
(
"days"
,
Integer
.
toString
(
config
.
days
));
databaseParam
.
put
(
"replica"
,
Integer
.
toString
(
config
.
replica
));
//TODO: other database parameters
databaseService
.
dropDatabase
(
config
.
database
);
databaseService
.
createDatabase
(
databaseParam
);
databaseService
.
useDatabase
(
config
.
database
);
long
end
=
System
.
currentTimeMillis
();
logger
.
info
(
">>> insert time cost : "
+
(
end
-
start
)
+
" ms."
);
}
// 建超级表,三种方式:1. 指定SQL,2. 指定field和tags的个数,3. 默认
private
void
createTableTask
(
JdbcTaosdemoConfig
config
)
{
long
start
=
System
.
currentTimeMillis
();
if
(
config
.
doCreateTable
)
{
superTableService
.
create
(
superTableMeta
);
// 批量建子表
subTableService
.
createSubTable
(
subTableMetaList
,
config
.
numOfThreadsForCreate
);
}
long
end
=
System
.
currentTimeMillis
();
logger
.
info
(
">>> create table time cost : "
+
(
end
-
start
)
+
" ms."
);
}
private
void
insertTask
(
JdbcTaosdemoConfig
config
)
{
long
start
=
System
.
currentTimeMillis
();
int
numOfThreadsForInsert
=
config
.
numOfThreadsForInsert
;
int
sleep
=
config
.
sleep
;
if
(
config
.
autoCreateTable
)
{
// 批量插入,自动建表
dataList
.
stream
().
forEach
(
subTableValues
->
{
subTableService
.
insertAutoCreateTable
(
subTableValues
,
numOfThreadsForInsert
);
sleep
(
sleep
);
});
}
else
{
dataList
.
stream
().
forEach
(
subTableValues
->
{
subTableService
.
insert
(
subTableValues
,
numOfThreadsForInsert
);
sleep
(
sleep
);
});
}
long
end
=
System
.
currentTimeMillis
();
logger
.
info
(
">>> insert time cost : "
+
(
end
-
start
)
+
" ms."
);
}
private
void
prepareData
(
JdbcTaosdemoConfig
config
)
{
long
start
=
System
.
currentTimeMillis
();
// 超级表的meta
superTableMeta
=
createSupertable
(
config
);
// 子表的meta
subTableMetaList
=
SubTableMetaGenerator
.
generate
(
superTableMeta
,
config
.
numOfTables
,
config
.
tablePrefix
);
// 子表的data
subTableValueList
=
SubTableValueGenerator
.
generate
(
subTableMetaList
,
config
.
numOfRowsPerTable
,
config
.
startTime
,
config
.
timeGap
);
// 如果有乱序,给数据搞乱
if
(
config
.
order
!=
0
)
{
SubTableValueGenerator
.
disrupt
(
subTableValueList
,
config
.
rate
,
config
.
range
);
}
// 分割数据
int
numOfTables
=
config
.
numOfTables
;
int
numOfTablesPerSQL
=
config
.
numOfTablesPerSQL
;
int
numOfRowsPerTable
=
config
.
numOfRowsPerTable
;
int
numOfValuesPerSQL
=
config
.
numOfValuesPerSQL
;
dataList
=
SubTableValueGenerator
.
split
(
subTableValueList
,
numOfTables
,
numOfTablesPerSQL
,
numOfRowsPerTable
,
numOfValuesPerSQL
);
long
end
=
System
.
currentTimeMillis
();
logger
.
info
(
">>> prepare data time cost : "
+
(
end
-
start
)
+
" ms."
);
}
private
SuperTableMeta
createSupertable
(
JdbcTaosdemoConfig
config
)
{
SuperTableMeta
tableMeta
;
// create super table
logger
.
info
(
">>> create super table <<<"
);
if
(
config
.
superTableSQL
!=
null
)
{
// use a sql to create super table
tableMeta
=
SuperTableMetaGenerator
.
generate
(
config
.
superTableSQL
);
}
else
if
(
config
.
numOfFields
==
0
)
{
// default sql = "create table test.weather (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)";
SuperTableMeta
superTableMeta
=
new
SuperTableMeta
();
superTableMeta
.
setDatabase
(
config
.
database
);
superTableMeta
.
setName
(
config
.
superTable
);
List
<
FieldMeta
>
fields
=
new
ArrayList
<>();
fields
.
add
(
new
FieldMeta
(
"ts"
,
"timestamp"
));
fields
.
add
(
new
FieldMeta
(
"temperature"
,
"float"
));
fields
.
add
(
new
FieldMeta
(
"humidity"
,
"int"
));
superTableMeta
.
setFields
(
fields
);
List
<
TagMeta
>
tags
=
new
ArrayList
<>();
tags
.
add
(
new
TagMeta
(
"location"
,
"nchar(64)"
));
tags
.
add
(
new
TagMeta
(
"groupId"
,
"int"
));
superTableMeta
.
setTags
(
tags
);
return
superTableMeta
;
}
else
{
// create super table with specified field size and tag size
tableMeta
=
SuperTableMetaGenerator
.
generate
(
config
.
database
,
config
.
superTable
,
config
.
numOfFields
,
config
.
prefixOfFields
,
config
.
numOfTags
,
config
.
prefixOfTags
);
}
return
tableMeta
;
}
private
static
void
sleep
(
int
sleep
)
{
if
(
sleep
<=
0
)
return
;
try
{
TimeUnit
.
MILLISECONDS
.
sleep
(
sleep
);
}
catch
(
InterruptedException
e
)
{
e
.
printStackTrace
();
}
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/DatabaseController.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.controller
;
import
com.taosdata.taosdemo.service.DatabaseService
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.web.bind.annotation.*
;
import
java.util.Map
;
@RestController
@RequestMapping
public
class
DatabaseController
{
@Autowired
private
DatabaseService
databaseService
;
/**
* create database
***/
@PostMapping
public
int
create
(
@RequestBody
Map
<
String
,
String
>
map
)
{
return
databaseService
.
createDatabase
(
map
);
}
/**
* drop database
**/
@DeleteMapping
(
"/{dbname}"
)
public
int
delete
(
@PathVariable
(
"dbname"
)
String
dbname
)
{
return
databaseService
.
dropDatabase
(
dbname
);
}
/**
* use database
**/
@GetMapping
(
"/{dbname}"
)
public
int
use
(
@PathVariable
(
"dbname"
)
String
dbname
)
{
return
databaseService
.
useDatabase
(
dbname
);
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/InsertController.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.controller
;
import
org.springframework.web.bind.annotation.RestController
;
@RestController
public
class
InsertController
{
//TODO:多线程写一张表, thread = 10, table = 1
//TODO:一个批次写多张表, insert into t1 using weather values() t2 using weather values()
//TODO:插入的频率,
//TODO:指定一张表内的records数量
//TODO:是否乱序,
//TODO:乱序的比例,乱序的范围
//TODO:先建表,自动建表
//TODO:一个批次写多张表
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SubTableController.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.controller
;
import
com.taosdata.taosdemo.domain.TableValue
;
import
com.taosdata.taosdemo.service.SuperTableService
;
import
com.taosdata.taosdemo.service.TableService
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.web.bind.annotation.PathVariable
;
import
org.springframework.web.bind.annotation.PostMapping
;
import
org.springframework.web.bind.annotation.RequestBody
;
import
org.springframework.web.bind.annotation.RestController
;
@RestController
public
class
SubTableController
{
@Autowired
private
TableService
tableService
;
@Autowired
private
SuperTableService
superTableService
;
//TODO: 使用supertable创建一个子表
//TODO:使用supertable创建多个子表
//TODO:使用supertable多线程创建子表
//TODO:使用supertable多线程创建子表,指定子表的name_prefix,子表的数量,使用线程的个数
/**
* 创建表,超级表或者普通表
**/
/**
* 创建超级表的子表
**/
@PostMapping
(
"/{database}/{superTable}"
)
public
int
createTable
(
@PathVariable
(
"database"
)
String
database
,
@PathVariable
(
"superTable"
)
String
superTable
,
@RequestBody
TableValue
tableMetadta
)
{
tableMetadta
.
setDatabase
(
database
);
return
0
;
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SuperTableController.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.controller
;
import
com.taosdata.taosdemo.domain.SuperTableMeta
;
import
com.taosdata.taosdemo.service.SuperTableService
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.web.bind.annotation.PathVariable
;
import
org.springframework.web.bind.annotation.PostMapping
;
import
org.springframework.web.bind.annotation.RequestBody
;
public
class
SuperTableController
{
@Autowired
private
SuperTableService
superTableService
;
@PostMapping
(
"/{database}"
)
public
int
createTable
(
@PathVariable
(
"database"
)
String
database
,
@RequestBody
SuperTableMeta
tableMetadta
)
{
tableMetadta
.
setDatabase
(
database
);
return
superTableService
.
create
(
tableMetadta
);
}
//TODO: 删除超级表
//TODO:查询超级表
//TODO:统计查询表
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/TableController.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.controller
;
public
class
TableController
{
//TODO:创建普通表,create table(ts timestamp, temperature float)
//TODO:创建普通表,指定表的列数,包括第一列timestamp
//TODO:创建普通表,指定表每列的name和type
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldMeta.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.domain
;
import
lombok.Data
;
@Data
public
class
FieldMeta
{
private
String
name
;
private
String
type
;
public
FieldMeta
()
{
}
public
FieldMeta
(
String
name
,
String
type
)
{
this
.
name
=
name
;
this
.
type
=
type
;
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldValue.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.domain
;
import
lombok.Data
;
@Data
public
class
FieldValue
<
T
>
{
private
String
name
;
private
T
value
;
public
FieldValue
()
{
}
public
FieldValue
(
String
name
,
T
value
)
{
this
.
name
=
name
;
this
.
value
=
value
;
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/RowValue.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.domain
;
import
lombok.Data
;
import
java.util.List
;
@Data
public
class
RowValue
{
private
List
<
FieldValue
>
fields
;
public
RowValue
(
List
<
FieldValue
>
fields
)
{
this
.
fields
=
fields
;
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableMeta.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.domain
;
import
lombok.Data
;
import
java.util.List
;
@Data
public
class
SubTableMeta
{
private
String
database
;
private
String
supertable
;
private
String
name
;
private
List
<
TagValue
>
tags
;
private
List
<
FieldMeta
>
fields
;
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableValue.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.domain
;
import
lombok.Data
;
import
java.util.List
;
@Data
public
class
SubTableValue
{
private
String
database
;
private
String
supertable
;
private
String
name
;
private
List
<
TagValue
>
tags
;
private
List
<
RowValue
>
values
;
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SuperTableMeta.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.domain
;
import
lombok.Data
;
import
java.util.List
;
@Data
public
class
SuperTableMeta
{
private
String
database
;
private
String
name
;
private
List
<
FieldMeta
>
fields
;
private
List
<
TagMeta
>
tags
;
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableMeta.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.domain
;
import
lombok.Data
;
import
java.util.List
;
@Data
public
class
TableMeta
{
private
String
database
;
private
String
name
;
private
List
<
FieldMeta
>
fields
;
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableValue.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.domain
;
import
lombok.Data
;
import
java.util.List
;
@Data
public
class
TableValue
{
private
String
database
;
private
String
name
;
private
List
<
FieldMeta
>
columns
;
private
List
<
RowValue
>
values
;
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagMeta.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.domain
;
import
lombok.Data
;
@Data
public
class
TagMeta
{
private
String
name
;
private
String
type
;
public
TagMeta
()
{
}
public
TagMeta
(
String
name
,
String
type
)
{
this
.
name
=
name
;
this
.
type
=
type
;
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagValue.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.domain
;
import
lombok.Data
;
@Data
public
class
TagValue
<
T
>
{
private
String
name
;
private
T
value
;
public
TagValue
()
{
}
public
TagValue
(
String
name
,
T
value
)
{
this
.
name
=
name
;
this
.
value
=
value
;
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.mapper
;
import
org.apache.ibatis.annotations.Param
;
import
org.springframework.stereotype.Repository
;
import
java.util.Map
;
@Repository
public
interface
DatabaseMapper
{
// create database if not exists XXX
int
createDatabase
(
@Param
(
"database"
)
String
dbname
);
// drop database if exists XXX
int
dropDatabase
(
@Param
(
"database"
)
String
dbname
);
// create database if not exists XXX keep XX days XX replica XX
int
createDatabaseWithParameters
(
Map
<
String
,
String
>
map
);
// use XXX
int
useDatabase
(
@Param
(
"database"
)
String
dbname
);
//TODO: alter database
//TODO: show database
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.xml
0 → 100644
浏览文件 @
cf4f0d95
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper
namespace=
"com.taosdata.taosdemo.mapper.DatabaseMapper"
>
<!-- create database XXX -->
<update
id=
"createDatabase"
parameterType=
"java.lang.String"
>
create database if not exists ${database}
</update>
<update
id=
"dropDatabase"
parameterType=
"java.lang.String"
>
DROP database if exists ${database}
</update>
<update
id=
"createDatabaseWithParameters"
parameterType=
"map"
>
CREATE database if not exists ${database}
<if
test=
"keep != null"
>
KEEP ${keep}
</if>
<if
test=
"days != null"
>
DAYS ${days}
</if>
<if
test=
"replica != null"
>
REPLICA ${replica}
</if>
<if
test=
"cache != null"
>
cache ${cache}
</if>
<if
test=
"blocks != null"
>
blocks ${blocks}
</if>
<if
test=
"minrows != null"
>
minrows ${minrows}
</if>
<if
test=
"maxrows != null"
>
maxrows ${maxrows}
</if>
</update>
<update
id=
"useDatabase"
parameterType=
"java.lang.String"
>
use ${database}
</update>
<!-- TODO: alter database -->
<!-- TODO: show database -->
</mapper>
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.mapper
;
import
com.taosdata.taosdemo.domain.SubTableMeta
;
import
com.taosdata.taosdemo.domain.SubTableValue
;
import
org.apache.ibatis.annotations.Param
;
import
org.springframework.stereotype.Repository
;
import
java.util.List
;
@Repository
public
interface
SubTableMapper
{
// 创建:子表
int
createUsingSuperTable
(
SubTableMeta
subTableMeta
);
// 插入:一张子表多个values
int
insertOneTableMultiValues
(
SubTableValue
subTableValue
);
// 插入:一张子表多个values, 自动建表
int
insertOneTableMultiValuesUsingSuperTable
(
SubTableValue
subTableValue
);
// 插入:多张表多个values
int
insertMultiTableMultiValues
(
@Param
(
"tables"
)
List
<
SubTableValue
>
tables
);
// 插入:多张表多个values,自动建表
int
insertMultiTableMultiValuesUsingSuperTable
(
@Param
(
"tables"
)
List
<
SubTableValue
>
tables
);
//<!-- TODO:修改子表标签值 alter table ${tablename} set tag tagName=newTagValue-->
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.xml
0 → 100644
浏览文件 @
cf4f0d95
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper
namespace=
"com.taosdata.taosdemo.mapper.SubTableMapper"
>
<!-- 创建子表 -->
<update
id=
"createUsingSuperTable"
>
CREATE table IF NOT EXISTS ${database}.${name} USING ${supertable} TAGS
<foreach
collection=
"tags"
item=
"tag"
index=
"index"
open=
"("
close=
")"
separator=
","
>
#{tag.value}
</foreach>
</update>
<!-- 插入:向一张表中插入多张表 -->
<insert
id=
"insertOneTableMultiValues"
>
INSERT INTO ${database}.${name}
VALUES
<foreach
collection=
"values"
item=
"value"
>
<foreach
collection=
"value.fields"
item=
"field"
open=
"("
close=
")"
separator=
","
>
#{field.value}
</foreach>
</foreach>
</insert>
<!-- 插入:使用自动建表模式,向一张表中插入多条数据 -->
<insert
id=
"insertOneTableMultiValuesUsingSuperTable"
>
INSERT INTO ${database}.${name} USING ${supertable} TAGS
<foreach
collection=
"tags"
item=
"tag"
index=
"index"
open=
"("
close=
")"
separator=
","
>
#{tag.value}
</foreach>
VALUES
<foreach
collection=
"values"
item=
"value"
>
<foreach
collection=
"value.fields"
item=
"field"
open=
"("
close=
")"
separator=
","
>
#{field.value}
</foreach>
</foreach>
</insert>
<!-- TODO:插入:向一张表中插入多张表, 指定列 -->
<!-- TODO:插入:向一张表中插入多张表, 自动建表,指定列 -->
<!-- 插入:向多张表中插入多条数据 -->
<insert
id=
"insertMultiTableMultiValues"
>
INSERT INTO
<foreach
collection=
"tables"
item=
"table"
>
${table.database}.${table.name}
VALUES
<foreach
collection=
"table.values"
item=
"value"
>
<foreach
collection=
"value.fields"
item=
"field"
open=
"("
close=
")"
separator=
","
>
#{field.value}
</foreach>
</foreach>
</foreach>
</insert>
<!-- 插入:向多张表中插入多条数据,自动建表 -->
<insert
id=
"insertMultiTableMultiValuesUsingSuperTable"
>
INSERT INTO
<foreach
collection=
"tables"
item=
"table"
>
${table.database}.${table.name} USING ${table.supertable} TAGS
<foreach
collection=
"table.tags"
item=
"tag"
index=
"index"
open=
"("
close=
")"
separator=
","
>
#{tag.value}
</foreach>
VALUES
<foreach
collection=
"table.values"
item=
"value"
>
<foreach
collection=
"value.fields"
item=
"field"
open=
"("
close=
")"
separator=
","
>
#{field.value}
</foreach>
</foreach>
</foreach>
</insert>
<!-- TODO:插入:向多张表中插入多张表, 指定列 -->
<!-- TODO:插入:向多张表中插入多张表, 自动建表,指定列 -->
<!-- TODO:修改子表标签值 alter table ${tablename} set tag tagName=newTagValue -->
<!-- TODO: -->
</mapper>
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.mapper
;
import
com.taosdata.taosdemo.domain.SuperTableMeta
;
import
org.apache.ibatis.annotations.Param
;
import
org.springframework.stereotype.Repository
;
@Repository
public
interface
SuperTableMapper
{
// 创建超级表,使用自己定义的SQL语句
int
createSuperTableUsingSQL
(
@Param
(
"createSuperTableSQL"
)
String
sql
);
// 创建超级表 create table if not exists xxx.xxx (f1 type1, f2 type2, ... ) tags( t1 type1, t2 type2 ...)
int
createSuperTable
(
SuperTableMeta
tableMetadata
);
// 删除超级表 drop table if exists xxx;
int
dropSuperTable
(
@Param
(
"database"
)
String
database
,
@Param
(
"name"
)
String
name
);
//<!-- TODO:查询所有超级表信息 show stables -->
//<!-- TODO:查询表结构 describe stable -->
//<!-- TODO:增加列 alter table ${tablename} add column fieldName dataType -->
//<!-- TODO:删除列 alter table ${tablename} drop column fieldName -->
//<!-- TODO:添加标签 alter table ${tablename} add tag new_tagName tag_type -->
//<!-- TODO:删除标签 alter table ${tablename} drop tag_name -->
//<!-- TODO:修改标签名 alter table ${tablename} change tag old_tagName new_tagName -->
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.xml
0 → 100644
浏览文件 @
cf4f0d95
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper
namespace=
"com.taosdata.taosdemo.mapper.SuperTableMapper"
>
<update
id=
"createSuperTableUsingSQL"
>
${createSuperTableSQL}
</update>
<!-- 创建超级表 -->
<update
id=
"createSuperTable"
>
create table if not exists ${database}.${name}
<foreach
collection=
"fields"
item=
"field"
index=
"index"
open=
"("
close=
")"
separator=
","
>
${field.name} ${field.type}
</foreach>
tags
<foreach
collection=
"tags"
item=
"tag"
index=
"index"
open=
"("
close=
")"
separator=
","
>
${tag.name} ${tag.type}
</foreach>
</update>
<!-- 删除超级表:drop super table -->
<delete
id=
"dropSuperTable"
>
drop table if exists ${database}.${name}
</delete>
<!-- TODO:查询所有超级表信息 show stables -->
<!-- TODO:查询表结构 describe stable -->
<!-- TODO:增加列 alter table ${tablename} add column fieldName dataType -->
<!-- TODO:删除列 alter table ${tablename} drop column fieldName -->
<!-- TODO:添加标签 alter table ${tablename} add tag new_tagName tag_type -->
<!-- TODO:删除标签 alter table ${tablename} drop tag_name -->
<!-- TODO:修改标签名 alter table ${tablename} change tag old_tagName new_tagName -->
</mapper>
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.mapper
;
import
com.taosdata.taosdemo.domain.TableMeta
;
import
com.taosdata.taosdemo.domain.TableValue
;
import
org.apache.ibatis.annotations.Param
;
import
org.springframework.stereotype.Repository
;
import
java.util.List
;
@Repository
public
interface
TableMapper
{
// 创建:普通表
int
create
(
TableMeta
tableMeta
);
// 插入:一张表多个value
int
insertOneTableMultiValues
(
TableValue
values
);
// 插入: 一张表多个value,指定的列
int
insertOneTableMultiValuesWithColumns
(
TableValue
values
);
// 插入:多个表多个value
int
insertMultiTableMultiValues
(
@Param
(
"tables"
)
List
<
TableValue
>
tables
);
// 插入:多个表多个value, 指定的列
int
insertMultiTableMultiValuesWithColumns
(
@Param
(
"tables"
)
List
<
TableValue
>
tables
);
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.xml
0 → 100644
浏览文件 @
cf4f0d95
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper
namespace=
"com.taosdata.taosdemo.mapper.TableMapper"
>
<!-- 创建普通表 -->
<update
id=
"create"
parameterType=
"com.taosdata.taosdemo.domain.TableMeta"
>
create table if not exists ${database}.${name}
<foreach
collection=
"fields"
item=
"field"
index=
"index"
open=
"("
close=
")"
separator=
","
>
${field.name} ${field.type}
</foreach>
</update>
<!-- 插入:向一张普通表中插入多条数据 -->
<insert
id=
"insertOneTableMultiValues"
parameterType=
"com.taosdata.taosdemo.domain.TableValue"
>
insert into ${database}.${name} values
<foreach
collection=
"values"
item=
"value"
>
<foreach
collection=
"value.fields"
item=
"field"
open=
"("
close=
")"
separator=
","
>
${field.value}
</foreach>
</foreach>
</insert>
<!-- 向一张表中插入指定列的数据 insert into XXX.xx (f1,f2,f3...) values(v1,v2,v3...) -->
<insert
id=
"insertOneTableMultiValuesWithColumns"
parameterType=
"com.taosdata.taosdemo.domain.TableValue"
>
insert into ${database}.${name}
<foreach
collection=
"columns"
item=
"column"
open=
"("
close=
")"
separator=
","
>
${column.name}
</foreach>
values
<foreach
collection=
"values"
item=
"value"
>
<foreach
collection=
"value.fields"
item=
"field"
open=
"("
close=
")"
separator=
","
>
${field.value}
</foreach>
</foreach>
</insert>
<!-- 向多个表中插入多条数据 -->
<insert
id=
"insertMultiTableMultiValues"
>
insert into
<foreach
collection=
"tables"
item=
"table"
>
${table.database}.${table.name} values
<foreach
collection=
"table.values"
item=
"value"
>
<foreach
collection=
"value.fields"
item=
"field"
open=
"("
close=
")"
separator=
","
>
${field.value}
</foreach>
</foreach>
</foreach>
</insert>
<!-- 向多张表中指定的列插入多条数据 -->
<insert
id=
"insertMultiTableMultiValuesWithColumns"
>
insert into
<foreach
collection=
"tables"
item=
"table"
>
${table.database}.${table.name}
<foreach
collection=
"table.columns"
item=
"column"
open=
"("
close=
")"
separator=
","
>
${column.name}
</foreach>
values
<foreach
collection=
"table.values"
item=
"value"
>
<foreach
collection=
"value.fields"
item=
"field"
open=
"("
close=
")"
separator=
","
>
${field.value}
</foreach>
</foreach>
</foreach>
</insert>
</mapper>
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/AbstractService.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service
;
import
java.util.List
;
import
java.util.concurrent.ExecutionException
;
import
java.util.concurrent.Future
;
public
class
AbstractService
{
protected
int
getAffectRows
(
List
<
Future
<
Integer
>>
futureList
)
{
int
count
=
0
;
for
(
Future
<
Integer
>
future
:
futureList
)
{
try
{
count
+=
future
.
get
();
}
catch
(
InterruptedException
e
)
{
e
.
printStackTrace
();
}
catch
(
ExecutionException
e
)
{
e
.
printStackTrace
();
}
}
return
count
;
}
protected
int
getAffectRows
(
Future
<
Integer
>
future
)
{
int
count
=
0
;
try
{
count
+=
future
.
get
();
}
catch
(
InterruptedException
e
)
{
e
.
printStackTrace
();
}
catch
(
ExecutionException
e
)
{
e
.
printStackTrace
();
}
return
count
;
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service
;
import
com.taosdata.taosdemo.mapper.DatabaseMapper
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.stereotype.Service
;
import
java.util.Map
;
@Service
public
class
DatabaseService
{
@Autowired
private
DatabaseMapper
databaseMapper
;
// 建库,指定 name
public
int
createDatabase
(
String
database
)
{
return
databaseMapper
.
createDatabase
(
database
);
}
// 建库,指定参数 keep,days,replica等
public
int
createDatabase
(
Map
<
String
,
String
>
map
)
{
if
(
map
.
isEmpty
())
return
0
;
if
(
map
.
containsKey
(
"database"
)
&&
map
.
size
()
==
1
)
return
databaseMapper
.
createDatabase
(
map
.
get
(
"database"
));
return
databaseMapper
.
createDatabaseWithParameters
(
map
);
}
// drop database
public
int
dropDatabase
(
String
dbname
)
{
return
databaseMapper
.
dropDatabase
(
dbname
);
}
// use database
public
int
useDatabase
(
String
dbname
)
{
return
databaseMapper
.
useDatabase
(
dbname
);
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service
;
import
com.taosdata.taosdemo.domain.SubTableMeta
;
import
com.taosdata.taosdemo.domain.SubTableValue
;
import
com.taosdata.taosdemo.mapper.SubTableMapper
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.stereotype.Service
;
import
java.util.ArrayList
;
import
java.util.List
;
import
java.util.concurrent.ExecutorService
;
import
java.util.concurrent.Executors
;
import
java.util.concurrent.Future
;
@Service
public
class
SubTableService
extends
AbstractService
{
@Autowired
private
SubTableMapper
mapper
;
/**
* 1. 选择database,找到所有supertable
* 2. 选择supertable,可以拿到表结构,包括field和tag
* 3. 指定子表的前缀和个数
* 4. 指定创建子表的线程数
*/
//TODO:指定database、supertable、子表前缀、子表个数、线程数
// 多线程创建表,指定线程个数
public
int
createSubTable
(
List
<
SubTableMeta
>
subTables
,
int
threadSize
)
{
ExecutorService
executor
=
Executors
.
newFixedThreadPool
(
threadSize
);
List
<
Future
<
Integer
>>
futureList
=
new
ArrayList
<>();
for
(
SubTableMeta
subTableMeta
:
subTables
)
{
Future
<
Integer
>
future
=
executor
.
submit
(()
->
createSubTable
(
subTableMeta
));
futureList
.
add
(
future
);
}
executor
.
shutdown
();
return
getAffectRows
(
futureList
);
}
// 创建一张子表,可以指定database,supertable,tablename,tag值
public
int
createSubTable
(
SubTableMeta
subTableMeta
)
{
return
mapper
.
createUsingSuperTable
(
subTableMeta
);
}
// 单线程创建多张子表,每张子表分别可以指定自己的database,supertable,tablename,tag值
public
int
createSubTable
(
List
<
SubTableMeta
>
subTables
)
{
return
createSubTable
(
subTables
,
1
);
}
/*************************************************************************************************************************/
// 插入:多线程,多表
public
int
insert
(
List
<
SubTableValue
>
subTableValues
,
int
threadSize
)
{
ExecutorService
executor
=
Executors
.
newFixedThreadPool
(
threadSize
);
Future
<
Integer
>
future
=
executor
.
submit
(()
->
insert
(
subTableValues
));
executor
.
shutdown
();
return
getAffectRows
(
future
);
}
// 插入:多线程,多表, 自动建表
public
int
insertAutoCreateTable
(
List
<
SubTableValue
>
subTableValues
,
int
threadSize
)
{
ExecutorService
executor
=
Executors
.
newFixedThreadPool
(
threadSize
);
Future
<
Integer
>
future
=
executor
.
submit
(()
->
insertAutoCreateTable
(
subTableValues
));
executor
.
shutdown
();
return
getAffectRows
(
future
);
}
// 插入:单表,insert into xxx values(),()...
public
int
insert
(
SubTableValue
subTableValue
)
{
return
mapper
.
insertOneTableMultiValues
(
subTableValue
);
}
// 插入: 多表,insert into xxx values(),()... xxx values(),()...
public
int
insert
(
List
<
SubTableValue
>
subTableValues
)
{
return
mapper
.
insertMultiTableMultiValuesUsingSuperTable
(
subTableValues
);
}
// 插入:单表,自动建表, insert into xxx using xxx tags(...) values(),()...
public
int
insertAutoCreateTable
(
SubTableValue
subTableValue
)
{
return
mapper
.
insertOneTableMultiValuesUsingSuperTable
(
subTableValue
);
}
// 插入:多表,自动建表, insert into xxx using XXX tags(...) values(),()... xxx using XXX tags(...) values(),()...
public
int
insertAutoCreateTable
(
List
<
SubTableValue
>
subTableValues
)
{
return
mapper
.
insertMultiTableMultiValuesUsingSuperTable
(
subTableValues
);
}
// ExecutorService executors = Executors.newFixedThreadPool(threadSize);
// int count = 0;
//
// //
// List<SubTableValue> subTableValues = new ArrayList<>();
// for (int tableIndex = 1; tableIndex <= numOfTablesPerSQL; tableIndex++) {
// // each table
// SubTableValue subTableValue = new SubTableValue();
// subTableValue.setDatabase();
// subTableValue.setName();
// subTableValue.setSupertable();
//
// List<RowValue> values = new ArrayList<>();
// for (int valueCnt = 0; valueCnt < numOfValuesPerSQL; valueCnt++) {
// List<FieldValue> fields = new ArrayList<>();
// for (int fieldInd = 0; fieldInd <; fieldInd++) {
// FieldValue<Object> field = new FieldValue<>("", "");
// fields.add(field);
// }
// RowValue row = new RowValue();
// row.setFields(fields);
// values.add(row);
// }
// subTableValue.setValues(values);
// subTableValues.add(subTableValue);
// }
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service
;
import
com.taosdata.taosdemo.domain.SuperTableMeta
;
import
com.taosdata.taosdemo.mapper.SuperTableMapper
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.stereotype.Service
;
@Service
public
class
SuperTableService
{
@Autowired
private
SuperTableMapper
superTableMapper
;
// 创建超级表,指定每个field的名称和类型,每个tag的名称和类型
public
int
create
(
SuperTableMeta
superTableMeta
)
{
return
superTableMapper
.
createSuperTable
(
superTableMeta
);
}
public
void
drop
(
String
database
,
String
name
)
{
superTableMapper
.
dropSuperTable
(
database
,
name
);
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service
;
import
com.taosdata.taosdemo.domain.TableMeta
;
import
com.taosdata.taosdemo.mapper.TableMapper
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.stereotype.Service
;
import
java.util.ArrayList
;
import
java.util.List
;
import
java.util.concurrent.ExecutorService
;
import
java.util.concurrent.Executors
;
import
java.util.concurrent.Future
;
@Service
public
class
TableService
extends
AbstractService
{
@Autowired
private
TableMapper
tableMapper
;
//创建一张表
public
int
create
(
TableMeta
tableMeta
)
{
return
tableMapper
.
create
(
tableMeta
);
}
//创建多张表
public
int
create
(
List
<
TableMeta
>
tables
)
{
return
create
(
tables
,
1
);
}
//多线程创建多张表
public
int
create
(
List
<
TableMeta
>
tables
,
int
threadSize
)
{
ExecutorService
executors
=
Executors
.
newFixedThreadPool
(
threadSize
);
List
<
Future
<
Integer
>>
futures
=
new
ArrayList
<>();
for
(
TableMeta
table
:
tables
)
{
Future
<
Integer
>
future
=
executors
.
submit
(()
->
create
(
table
));
futures
.
add
(
future
);
}
return
getAffectRows
(
futures
);
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/FieldValueGenerator.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service.data
;
import
com.taosdata.taosdemo.domain.FieldMeta
;
import
com.taosdata.taosdemo.domain.FieldValue
;
import
com.taosdata.taosdemo.domain.RowValue
;
import
com.taosdata.taosdemo.utils.DataGenerator
;
import
java.util.*
;
public
class
FieldValueGenerator
{
public
static
Random
random
=
new
Random
(
System
.
currentTimeMillis
());
// 生成start到end的时间序列,时间戳为顺序,不含有乱序,field的value为随机生成
public
static
List
<
RowValue
>
generate
(
long
start
,
long
end
,
long
timeGap
,
List
<
FieldMeta
>
fieldMetaList
)
{
List
<
RowValue
>
values
=
new
ArrayList
<>();
for
(
long
ts
=
start
;
ts
<
end
;
ts
+=
timeGap
)
{
List
<
FieldValue
>
fieldValues
=
new
ArrayList
<>();
// timestamp
fieldValues
.
add
(
new
FieldValue
(
fieldMetaList
.
get
(
0
).
getName
(),
ts
));
// other values
for
(
int
fieldInd
=
1
;
fieldInd
<
fieldMetaList
.
size
();
fieldInd
++)
{
FieldMeta
fieldMeta
=
fieldMetaList
.
get
(
fieldInd
);
fieldValues
.
add
(
new
FieldValue
(
fieldMeta
.
getName
(),
DataGenerator
.
randomValue
(
fieldMeta
.
getType
())));
}
values
.
add
(
new
RowValue
(
fieldValues
));
}
return
values
;
}
// 生成start到end的时间序列,时间戳为顺序,含有乱序,rate为乱序的比例,range为乱序前跳范围,field的value为随机生成
public
static
List
<
RowValue
>
disrupt
(
List
<
RowValue
>
values
,
int
rate
,
long
range
)
{
long
timeGap
=
(
long
)
(
values
.
get
(
1
).
getFields
().
get
(
0
).
getValue
())
-
(
long
)
(
values
.
get
(
0
).
getFields
().
get
(
0
).
getValue
());
int
bugSize
=
values
.
size
()
*
rate
/
100
;
Set
<
Integer
>
bugIndSet
=
new
HashSet
<>();
while
(
bugIndSet
.
size
()
<
bugSize
)
{
bugIndSet
.
add
(
random
.
nextInt
(
values
.
size
()));
}
for
(
Integer
bugInd
:
bugIndSet
)
{
Long
timestamp
=
(
Long
)
values
.
get
(
bugInd
).
getFields
().
get
(
0
).
getValue
();
Long
newTimestamp
=
timestamp
-
timeGap
-
random
.
nextInt
((
int
)
range
);
values
.
get
(
bugInd
).
getFields
().
get
(
0
).
setValue
(
newTimestamp
);
}
return
values
;
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service.data
;
import
com.taosdata.taosdemo.domain.SubTableMeta
;
import
com.taosdata.taosdemo.domain.SuperTableMeta
;
import
com.taosdata.taosdemo.domain.TagValue
;
import
java.util.ArrayList
;
import
java.util.List
;
public
class
SubTableMetaGenerator
{
// 创建tableSize张子表,使用tablePrefix作为子表名的前缀,使用superTableMeta的元数据
// create table xxx using XXX tags(XXX)
public
static
List
<
SubTableMeta
>
generate
(
SuperTableMeta
superTableMeta
,
int
tableSize
,
String
tablePrefix
)
{
List
<
SubTableMeta
>
subTableMetaList
=
new
ArrayList
<>();
for
(
int
i
=
1
;
i
<=
tableSize
;
i
++)
{
SubTableMeta
subTableMeta
=
new
SubTableMeta
();
// create table xxx.xxx using xxx tags(...)
subTableMeta
.
setDatabase
(
superTableMeta
.
getDatabase
());
subTableMeta
.
setName
(
tablePrefix
+
i
);
subTableMeta
.
setSupertable
(
superTableMeta
.
getName
());
subTableMeta
.
setFields
(
superTableMeta
.
getFields
());
List
<
TagValue
>
tagValues
=
TagValueGenerator
.
generate
(
superTableMeta
.
getTags
());
subTableMeta
.
setTags
(
tagValues
);
subTableMetaList
.
add
(
subTableMeta
);
}
return
subTableMetaList
;
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableValueGenerator.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service.data
;
import
com.taosdata.taosdemo.domain.RowValue
;
import
com.taosdata.taosdemo.domain.SubTableMeta
;
import
com.taosdata.taosdemo.domain.SubTableValue
;
import
com.taosdata.taosdemo.utils.TimeStampUtil
;
import
org.springframework.beans.BeanUtils
;
import
java.util.ArrayList
;
import
java.util.List
;
public
class
SubTableValueGenerator
{
public
static
List
<
SubTableValue
>
generate
(
List
<
SubTableMeta
>
subTableMetaList
,
int
numOfRowsPerTable
,
long
start
,
long
timeGap
)
{
List
<
SubTableValue
>
subTableValueList
=
new
ArrayList
<>();
subTableMetaList
.
stream
().
forEach
((
subTableMeta
)
->
{
// insert into xxx.xxx using xxxx tags(...) values(),()...
SubTableValue
subTableValue
=
new
SubTableValue
();
subTableValue
.
setDatabase
(
subTableMeta
.
getDatabase
());
subTableValue
.
setName
(
subTableMeta
.
getName
());
subTableValue
.
setSupertable
(
subTableMeta
.
getSupertable
());
subTableValue
.
setTags
(
subTableMeta
.
getTags
());
TimeStampUtil
.
TimeTuple
tuple
=
TimeStampUtil
.
range
(
start
,
timeGap
,
numOfRowsPerTable
);
List
<
RowValue
>
values
=
FieldValueGenerator
.
generate
(
tuple
.
start
,
tuple
.
end
,
tuple
.
timeGap
,
subTableMeta
.
getFields
());
subTableValue
.
setValues
(
values
);
subTableValueList
.
add
(
subTableValue
);
});
return
subTableValueList
;
}
public
static
void
disrupt
(
List
<
SubTableValue
>
subTableValueList
,
int
rate
,
long
range
)
{
subTableValueList
.
stream
().
forEach
((
tableValue
)
->
{
List
<
RowValue
>
values
=
tableValue
.
getValues
();
FieldValueGenerator
.
disrupt
(
values
,
rate
,
range
);
});
}
public
static
List
<
List
<
SubTableValue
>>
split
(
List
<
SubTableValue
>
subTableValueList
,
int
numOfTables
,
int
numOfTablesPerSQL
,
int
numOfRowsPerTable
,
int
numOfValuesPerSQL
)
{
List
<
List
<
SubTableValue
>>
dataList
=
new
ArrayList
<>();
if
(
numOfRowsPerTable
<
numOfValuesPerSQL
)
numOfValuesPerSQL
=
numOfRowsPerTable
;
if
(
numOfTables
<
numOfTablesPerSQL
)
numOfTablesPerSQL
=
numOfTables
;
//table
for
(
int
tableCnt
=
0
;
tableCnt
<
numOfTables
;
)
{
int
tableSize
=
numOfTablesPerSQL
;
if
(
tableCnt
+
tableSize
>
numOfTables
)
{
tableSize
=
numOfTables
-
tableCnt
;
}
// row
for
(
int
rowCnt
=
0
;
rowCnt
<
numOfRowsPerTable
;
)
{
int
rowSize
=
numOfValuesPerSQL
;
if
(
rowCnt
+
rowSize
>
numOfRowsPerTable
)
{
rowSize
=
numOfRowsPerTable
-
rowCnt
;
}
// System.out.println("rowCnt: " + rowCnt + ", rowSize: " + rowSize + ", tableCnt: " + tableCnt + ", tableSize: " + tableSize);
// split
List
<
SubTableValue
>
blocks
=
subTableValueList
.
subList
(
tableCnt
,
tableCnt
+
tableSize
);
List
<
SubTableValue
>
newBlocks
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
blocks
.
size
();
i
++)
{
SubTableValue
subTableValue
=
blocks
.
get
(
i
);
SubTableValue
newSubTableValue
=
new
SubTableValue
();
BeanUtils
.
copyProperties
(
subTableValue
,
newSubTableValue
);
List
<
RowValue
>
values
=
subTableValue
.
getValues
().
subList
(
rowCnt
,
rowCnt
+
rowSize
);
newSubTableValue
.
setValues
(
values
);
newBlocks
.
add
(
newSubTableValue
);
}
dataList
.
add
(
newBlocks
);
rowCnt
+=
rowSize
;
}
tableCnt
+=
tableSize
;
}
return
dataList
;
}
public
static
void
main
(
String
[]
args
)
{
split
(
null
,
99
,
10
,
99
,
10
);
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SuperTableMetaGenerator.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service.data
;
import
com.taosdata.taosdemo.domain.FieldMeta
;
import
com.taosdata.taosdemo.domain.SuperTableMeta
;
import
com.taosdata.taosdemo.domain.TagMeta
;
import
com.taosdata.taosdemo.utils.TaosConstants
;
import
java.util.ArrayList
;
import
java.util.List
;
public
class
SuperTableMetaGenerator
{
// 创建超级表,使用指定SQL语句
public
static
SuperTableMeta
generate
(
String
superTableSQL
)
{
SuperTableMeta
tableMeta
=
new
SuperTableMeta
();
// for example : create table superTable (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)
superTableSQL
=
superTableSQL
.
trim
().
toLowerCase
();
if
(!
superTableSQL
.
startsWith
(
"create"
))
throw
new
RuntimeException
(
"invalid create super table SQL"
);
if
(
superTableSQL
.
contains
(
"tags"
))
{
String
tagSQL
=
superTableSQL
.
substring
(
superTableSQL
.
indexOf
(
"tags"
)
+
4
).
trim
();
tagSQL
=
tagSQL
.
substring
(
tagSQL
.
indexOf
(
"("
)
+
1
,
tagSQL
.
lastIndexOf
(
")"
));
String
[]
tagPairs
=
tagSQL
.
split
(
","
);
List
<
TagMeta
>
tagMetaList
=
new
ArrayList
<>();
for
(
String
tagPair
:
tagPairs
)
{
String
name
=
tagPair
.
trim
().
split
(
"\\s+"
)[
0
];
String
type
=
tagPair
.
trim
().
split
(
"\\s+"
)[
1
];
tagMetaList
.
add
(
new
TagMeta
(
name
,
type
));
}
tableMeta
.
setTags
(
tagMetaList
);
superTableSQL
=
superTableSQL
.
substring
(
0
,
superTableSQL
.
indexOf
(
"tags"
));
}
if
(
superTableSQL
.
contains
(
"("
))
{
String
fieldSQL
=
superTableSQL
.
substring
(
superTableSQL
.
indexOf
(
"("
)
+
1
,
superTableSQL
.
indexOf
(
")"
));
String
[]
fieldPairs
=
fieldSQL
.
split
(
","
);
List
<
FieldMeta
>
fieldList
=
new
ArrayList
<>();
for
(
String
fieldPair
:
fieldPairs
)
{
String
name
=
fieldPair
.
trim
().
split
(
"\\s+"
)[
0
];
String
type
=
fieldPair
.
trim
().
split
(
"\\s+"
)[
1
];
fieldList
.
add
(
new
FieldMeta
(
name
,
type
));
}
tableMeta
.
setFields
(
fieldList
);
superTableSQL
=
superTableSQL
.
substring
(
0
,
superTableSQL
.
indexOf
(
"("
));
}
superTableSQL
=
superTableSQL
.
substring
(
superTableSQL
.
indexOf
(
"table"
)
+
5
).
trim
();
if
(
superTableSQL
.
contains
(
"."
))
{
String
database
=
superTableSQL
.
split
(
"\\."
)[
0
];
tableMeta
.
setDatabase
(
database
);
superTableSQL
=
superTableSQL
.
substring
(
superTableSQL
.
indexOf
(
"."
)
+
1
);
}
tableMeta
.
setName
(
superTableSQL
.
trim
());
return
tableMeta
;
}
// 创建超级表,指定field和tag的个数
public
static
SuperTableMeta
generate
(
String
database
,
String
name
,
int
fieldSize
,
String
fieldPrefix
,
int
tagSize
,
String
tagPrefix
)
{
if
(
fieldSize
<
2
||
tagSize
<
1
)
{
throw
new
RuntimeException
(
"create super table but fieldSize less than 2 or tagSize less than 1"
);
}
SuperTableMeta
tableMetadata
=
new
SuperTableMeta
();
tableMetadata
.
setDatabase
(
database
);
tableMetadata
.
setName
(
name
);
// fields
List
<
FieldMeta
>
fields
=
new
ArrayList
<>();
fields
.
add
(
new
FieldMeta
(
"ts"
,
"timestamp"
));
for
(
int
i
=
1
;
i
<=
fieldSize
;
i
++)
{
fields
.
add
(
new
FieldMeta
(
fieldPrefix
+
""
+
i
,
TaosConstants
.
DATA_TYPES
[
i
%
TaosConstants
.
DATA_TYPES
.
length
]));
}
tableMetadata
.
setFields
(
fields
);
// tags
List
<
TagMeta
>
tags
=
new
ArrayList
<>();
for
(
int
i
=
1
;
i
<=
tagSize
;
i
++)
{
tags
.
add
(
new
TagMeta
(
tagPrefix
+
""
+
i
,
TaosConstants
.
DATA_TYPES
[
i
%
TaosConstants
.
DATA_TYPES
.
length
]));
}
tableMetadata
.
setTags
(
tags
);
return
tableMetadata
;
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/TagValueGenerator.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service.data
;
import
com.taosdata.taosdemo.domain.TagMeta
;
import
com.taosdata.taosdemo.domain.TagValue
;
import
com.taosdata.taosdemo.utils.DataGenerator
;
import
java.util.ArrayList
;
import
java.util.List
;
public
class
TagValueGenerator
{
// 创建标签值:使用tagMetas
public
static
List
<
TagValue
>
generate
(
List
<
TagMeta
>
tagMetas
)
{
List
<
TagValue
>
tagValues
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
tagMetas
.
size
();
i
++)
{
TagMeta
tagMeta
=
tagMetas
.
get
(
i
);
TagValue
tagValue
=
new
TagValue
();
tagValue
.
setName
(
tagMeta
.
getName
());
tagValue
.
setValue
(
DataGenerator
.
randomValue
(
tagMeta
.
getType
()));
tagValues
.
add
(
tagValue
);
}
return
tagValues
;
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/DataGenerator.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.utils
;
import
java.util.Random
;
public
class
DataGenerator
{
private
static
Random
random
=
new
Random
(
System
.
currentTimeMillis
());
private
static
final
String
alphabet
=
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
;
// "timestamp", "int", "bigint", "float", "double", "binary(64)", "smallint", "tinyint", "bool", "nchar(64)",
public
static
Object
randomValue
(
String
type
)
{
int
length
=
64
;
if
(
type
.
contains
(
"("
))
{
length
=
Integer
.
parseInt
(
type
.
substring
(
type
.
indexOf
(
"("
)
+
1
,
type
.
indexOf
(
")"
)));
type
=
type
.
substring
(
0
,
type
.
indexOf
(
"("
));
}
switch
(
type
.
trim
().
toLowerCase
())
{
case
"timestamp"
:
return
randomTimestamp
();
case
"int"
:
return
randomInt
();
case
"bigint"
:
return
randomBigint
();
case
"float"
:
return
randomFloat
();
case
"double"
:
return
randomDouble
();
case
"binary"
:
return
randomBinary
(
length
);
case
"smallint"
:
return
randomSmallint
();
case
"tinyint"
:
return
randomTinyint
();
case
"bool"
:
return
randomBoolean
();
case
"nchar"
:
return
randomNchar
(
length
);
default
:
throw
new
IllegalArgumentException
(
"Unexpected value: "
+
type
);
}
}
public
static
Long
randomTimestamp
()
{
long
start
=
System
.
currentTimeMillis
();
return
randomTimestamp
(
start
,
start
+
60
l
*
60
l
*
1000
l
);
}
public
static
Long
randomTimestamp
(
Long
start
,
Long
end
)
{
return
start
+
(
long
)
random
.
nextInt
((
int
)
(
end
-
start
));
}
public
static
String
randomNchar
(
int
length
)
{
return
randomChinese
(
length
);
}
public
static
Boolean
randomBoolean
()
{
return
random
.
nextBoolean
();
}
public
static
Integer
randomTinyint
()
{
return
randomInt
(-
127
,
127
);
}
public
static
Integer
randomSmallint
()
{
return
randomInt
(-
32767
,
32767
);
}
public
static
String
randomBinary
(
int
length
)
{
return
randomString
(
length
);
}
public
static
String
randomString
(
int
length
)
{
String
zh_en
=
""
;
for
(
int
i
=
0
;
i
<
length
;
i
++)
{
zh_en
+=
alphabet
.
charAt
(
random
.
nextInt
(
alphabet
.
length
()));
}
return
zh_en
;
}
public
static
String
randomChinese
(
int
length
)
{
String
zh_cn
=
""
;
int
bottom
=
Integer
.
parseInt
(
"4e00"
,
16
);
int
top
=
Integer
.
parseInt
(
"9fa5"
,
16
);
for
(
int
i
=
0
;
i
<
length
;
i
++)
{
char
c
=
(
char
)
(
random
.
nextInt
(
top
-
bottom
+
1
)
+
bottom
);
zh_cn
+=
new
String
(
new
char
[]{
c
});
}
return
zh_cn
;
}
public
static
Double
randomDouble
()
{
return
randomDouble
(
0
,
100
);
}
public
static
Double
randomDouble
(
double
bottom
,
double
top
)
{
return
bottom
+
(
top
-
bottom
)
*
random
.
nextDouble
();
}
public
static
Float
randomFloat
()
{
return
randomFloat
(
0
,
100
);
}
public
static
Float
randomFloat
(
float
bottom
,
float
top
)
{
return
bottom
+
(
top
-
bottom
)
*
random
.
nextFloat
();
}
public
static
Long
randomBigint
()
{
return
random
.
nextLong
();
}
public
static
Integer
randomInt
(
int
bottom
,
int
top
)
{
return
bottom
+
random
.
nextInt
((
top
-
bottom
));
}
public
static
Integer
randomInt
()
{
return
randomInt
(
0
,
100
);
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/JdbcTaosdemoConfig.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.utils
;
public
final
class
JdbcTaosdemoConfig
{
// instance
public
String
host
;
//host
public
int
port
=
6030
;
//port
public
String
user
=
"root"
;
//user
public
String
password
=
"taosdata"
;
//password
// database
public
String
database
=
"test"
;
//database
public
int
keep
=
3650
;
//keep
public
int
days
=
30
;
//days
public
int
replica
=
1
;
//replica
//super table
public
boolean
doCreateTable
=
true
;
public
String
superTable
=
"weather"
;
//super table name
public
String
prefixOfFields
=
"col"
;
public
int
numOfFields
;
public
String
prefixOfTags
=
"tag"
;
public
int
numOfTags
;
public
String
superTableSQL
;
//sub table
public
String
tablePrefix
=
"t"
;
public
int
numOfTables
=
100
;
public
int
numOfThreadsForCreate
=
1
;
// insert task
public
boolean
autoCreateTable
;
public
int
numOfRowsPerTable
=
100
;
public
int
numOfThreadsForInsert
=
1
;
public
int
numOfTablesPerSQL
=
10
;
public
int
numOfValuesPerSQL
=
10
;
public
long
startTime
;
public
long
timeGap
;
public
int
sleep
=
0
;
public
int
order
=
0
;
public
int
rate
=
10
;
public
long
range
=
1000
l
;
// select task
// drop task
public
boolean
dropTable
=
false
;
public
static
void
printHelp
()
{
System
.
out
.
println
(
"Usage: java -jar jdbc-taosdemo-2.0.jar [OPTION...]"
);
// instance
System
.
out
.
println
(
"-host The host to connect to TDengine which you must specify"
);
System
.
out
.
println
(
"-port The TCP/IP port number to use for the connection. Default is 6030"
);
System
.
out
.
println
(
"-user The TDengine user name to use when connecting to the server. Default is 'root'"
);
System
.
out
.
println
(
"-password The password to use when connecting to the server.Default is 'taosdata'"
);
// database
System
.
out
.
println
(
"-database Destination database. Default is 'test'"
);
System
.
out
.
println
(
"-keep database keep parameter. Default is 3650"
);
System
.
out
.
println
(
"-days database days parameter. Default is 30"
);
System
.
out
.
println
(
"-replica database replica parameter. Default 1, min: 1, max: 3"
);
// super table
System
.
out
.
println
(
"-doCreateTable do create super table and sub table, true or false, Default true"
);
System
.
out
.
println
(
"-superTable super table name. Default 'weather'"
);
System
.
out
.
println
(
"-prefixOfFields The prefix of field in super table. Default is 'col'"
);
System
.
out
.
println
(
"-numOfFields The number of field in super table. Default is (ts timestamp, temperature float, humidity int)."
);
System
.
out
.
println
(
"-prefixOfTags The prefix of tag in super table. Default is 'tag'"
);
System
.
out
.
println
(
"-numOfTags The number of tag in super table. Default is (location nchar(64), groupId int)."
);
System
.
out
.
println
(
"-superTableSQL specify a sql statement for the super table.\n"
+
" Default is 'create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int). \n"
+
" if you use this parameter, the numOfFields and numOfTags will be invalid'"
);
// sub table
System
.
out
.
println
(
"-tablePrefix The prefix of sub tables. Default is 't'"
);
System
.
out
.
println
(
"-numOfTables The number of tables. Default is 1"
);
System
.
out
.
println
(
"-numOfThreadsForCreate The number of thread during create sub table. Default is 1"
);
// insert task
System
.
out
.
println
(
"-autoCreateTable Use auto Create sub tables SQL. Default is false"
);
System
.
out
.
println
(
"-numOfRowsPerTable The number of records per table. Default is 1"
);
System
.
out
.
println
(
"-numOfThreadsForInsert The number of threads during insert row. Default is 1"
);
System
.
out
.
println
(
"-numOfTablesPerSQL The number of table per SQL. Default is 1"
);
System
.
out
.
println
(
"-numOfValuesPerSQL The number of value per SQL. Default is 1"
);
System
.
out
.
println
(
"-startTime start time for insert task, The format is \"yyyy-MM-dd HH:mm:ss.SSS\"."
);
System
.
out
.
println
(
"-timeGap the number of time gap. Default is 1000 ms"
);
System
.
out
.
println
(
"-sleep The number of milliseconds for sleep after each insert. default is 0"
);
System
.
out
.
println
(
"-order Insert mode--0: In order, 1: Out of order. Default is in order"
);
System
.
out
.
println
(
"-rate The proportion of data out of order. effective only if order is 1. min 0, max 100, default is 10"
);
System
.
out
.
println
(
"-range The range of data out of order. effective only if order is 1. default is 1000 ms"
);
// query task
// System.out.println("-sqlFile The select sql file");
// drop task
System
.
out
.
println
(
"-dropTable Drop data before quit. Default is false"
);
System
.
out
.
println
(
"--help Give this help list"
);
}
/**
* parse args from command line
*
* @param args command line args
* @return JdbcTaosdemoConfig
*/
public
JdbcTaosdemoConfig
(
String
[]
args
)
{
for
(
int
i
=
0
;
i
<
args
.
length
;
i
++)
{
// instance
if
(
"-host"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
host
=
args
[++
i
];
}
if
(
"-port"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
port
=
Integer
.
parseInt
(
args
[++
i
]);
}
if
(
"-user"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
user
=
args
[++
i
];
}
if
(
"-password"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
password
=
args
[++
i
];
}
// database
if
(
"-database"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
database
=
args
[++
i
];
}
if
(
"-keep"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
keep
=
Integer
.
parseInt
(
args
[++
i
]);
}
if
(
"-days"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
days
=
Integer
.
parseInt
(
args
[++
i
]);
}
if
(
"-replica"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
replica
=
Integer
.
parseInt
(
args
[++
i
]);
}
// super table
if
(
"-doCreateTable"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
doCreateTable
=
Boolean
.
parseBoolean
(
args
[++
i
]);
}
if
(
"-superTable"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
superTable
=
args
[++
i
];
}
if
(
"-prefixOfFields"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
prefixOfFields
=
args
[++
i
];
}
if
(
"-numOfFields"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
numOfFields
=
Integer
.
parseInt
(
args
[++
i
]);
}
if
(
"-prefixOfTags"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
prefixOfTags
=
args
[++
i
];
}
if
(
"-numOfTags"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
numOfTags
=
Integer
.
parseInt
(
args
[++
i
]);
}
if
(
"-superTableSQL"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
superTableSQL
=
args
[++
i
];
}
// sub table
if
(
"-tablePrefix"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
tablePrefix
=
args
[++
i
];
}
if
(
"-numOfTables"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
numOfTables
=
Integer
.
parseInt
(
args
[++
i
]);
}
if
(
"-autoCreateTable"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
autoCreateTable
=
Boolean
.
parseBoolean
(
args
[++
i
]);
}
if
(
"-numOfThreadsForCreate"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
numOfThreadsForCreate
=
Integer
.
parseInt
(
args
[++
i
]);
}
// insert task
if
(
"-numOfRowsPerTable"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
numOfRowsPerTable
=
Integer
.
parseInt
(
args
[++
i
]);
}
if
(
"-numOfThreadsForInsert"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
numOfThreadsForInsert
=
Integer
.
parseInt
(
args
[++
i
]);
}
if
(
"-numOfTablesPerSQL"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
numOfTablesPerSQL
=
Integer
.
parseInt
(
args
[++
i
]);
}
if
(
"-numOfValuesPerSQL"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
numOfValuesPerSQL
=
Integer
.
parseInt
(
args
[++
i
]);
}
if
(
"-startTime"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
startTime
=
TimeStampUtil
.
datetimeToLong
(
args
[++
i
]);
}
if
(
"-timeGap"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
timeGap
=
Long
.
parseLong
(
args
[++
i
]);
}
if
(
"-sleep"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
sleep
=
Integer
.
parseInt
(
args
[++
i
]);
}
if
(
"-order"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
order
=
Integer
.
parseInt
(
args
[++
i
]);
}
if
(
"-rate"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
rate
=
Integer
.
parseInt
(
args
[++
i
]);
if
(
rate
<
0
||
rate
>
100
)
throw
new
IllegalArgumentException
(
"rate must between 0 and 100"
);
}
if
(
"-range"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
range
=
Integer
.
parseInt
(
args
[++
i
]);
}
// select task
// drop task
if
(
"-dropTable"
.
equals
(
args
[
i
])
&&
i
<
args
.
length
-
1
)
{
dropTable
=
Boolean
.
parseBoolean
(
args
[++
i
]);
}
}
}
public
static
void
main
(
String
[]
args
)
{
JdbcTaosdemoConfig
config
=
new
JdbcTaosdemoConfig
(
args
);
}
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TaosConstants.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.utils
;
public
class
TaosConstants
{
public
static
final
String
[]
DATA_TYPES
=
{
"timestamp"
,
"int"
,
"bigint"
,
"float"
,
"double"
,
"binary(64)"
,
"smallint"
,
"tinyint"
,
"bool"
,
"nchar(64)"
,
};
}
tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TimeStampUtil.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.utils
;
import
java.text.ParseException
;
import
java.text.SimpleDateFormat
;
import
java.util.Date
;
public
class
TimeStampUtil
{
private
static
final
String
datetimeFormat
=
"yyyy-MM-dd HH:mm:ss.SSS"
;
public
static
long
datetimeToLong
(
String
dateTime
)
{
SimpleDateFormat
sdf
=
new
SimpleDateFormat
(
datetimeFormat
);
try
{
return
sdf
.
parse
(
dateTime
).
getTime
();
}
catch
(
ParseException
e
)
{
throw
new
IllegalArgumentException
(
"invalid datetime string >>> "
+
dateTime
);
}
}
public
static
String
longToDatetime
(
long
time
)
{
SimpleDateFormat
sdf
=
new
SimpleDateFormat
(
datetimeFormat
);
return
sdf
.
format
(
new
Date
(
time
));
}
public
static
class
TimeTuple
{
public
Long
start
;
public
Long
end
;
public
Long
timeGap
;
TimeTuple
(
long
start
,
long
end
,
long
timeGap
)
{
this
.
start
=
start
;
this
.
end
=
end
;
this
.
timeGap
=
timeGap
;
}
}
public
static
TimeTuple
range
(
long
start
,
long
timeGap
,
long
size
)
{
long
now
=
System
.
currentTimeMillis
();
if
(
timeGap
<
1
)
timeGap
=
1
;
if
(
start
==
0
)
start
=
now
-
size
*
timeGap
;
// 如果size小于1异常
if
(
size
<
1
)
throw
new
IllegalArgumentException
(
"size less than 1."
);
// 如果timeGap为1,已经超长,需要前移start
if
(
start
+
size
>
now
)
{
start
=
now
-
size
;
return
new
TimeTuple
(
start
,
now
,
1
);
}
long
end
=
start
+
(
long
)
(
timeGap
*
size
);
if
(
end
>
now
)
{
//压缩timeGap
end
=
now
;
double
gap
=
(
end
-
start
)
/
(
size
*
1.0f
);
if
(
gap
<
1.0f
)
{
timeGap
=
1
;
start
=
end
-
size
;
}
else
{
timeGap
=
(
long
)
gap
;
end
=
start
+
(
long
)
(
timeGap
*
size
);
}
}
return
new
TimeTuple
(
start
,
end
,
timeGap
);
}
}
tests/examples/JDBC/taosdemo/src/main/resources/application.properties
0 → 100644
浏览文件 @
cf4f0d95
#spring.datasource.url=jdbc:mysql://master:3306/?useSSL=false&useUnicode=true&characterEncoding=UTF-8
#spring.datasource.driver-class-name=com.mysql.jdbc.Driver
#spring.datasource.username=root
#spring.datasource.password=123456
spring.datasource.url
=
jdbc:TAOS://master:6030/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8
spring.datasource.driver-class-name
=
com.taosdata.jdbc.TSDBDriver
spring.datasource.username
=
root
spring.datasource.password
=
taosdata
spring.datasource.hikari.maximum-pool-size
=
10
spring.datasource.hikari.minimum-idle
=
10
spring.datasource.hikari.max-lifetime
=
600000
logging.level.com.taosdata.taosdemo.mapper
=
debug
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/main/resources/log4j.properties
0 → 100644
浏览文件 @
cf4f0d95
### 设置###
log4j.rootLogger
=
debug,stdout,DebugLog,ErrorLog
### 输出信息到控制抬 ###
log4j.appender.stdout
=
org.apache.log4j.ConsoleAppender
log4j.appender.stdout.Target
=
System.out
log4j.appender.stdout.layout
=
org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern
=
[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n
### 输出DEBUG 级别以上的日志到=logs/debug.log
log4j.appender.DebugLog
=
org.apache.log4j.DailyRollingFileAppender
log4j.appender.DebugLog.File
=
logs/debug.log
log4j.appender.DebugLog.Append
=
true
log4j.appender.DebugLog.Threshold
=
DEBUG
log4j.appender.DebugLog.layout
=
org.apache.log4j.PatternLayout
log4j.appender.DebugLog.layout.ConversionPattern
=
%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
### 输出ERROR 级别以上的日志到=logs/error.log
log4j.appender.ErrorLog
=
org.apache.log4j.DailyRollingFileAppender
log4j.appender.ErrorLog.File
=
logs/error.log
log4j.appender.ErrorLog.Append
=
true
log4j.appender.ErrorLog.Threshold
=
ERROR
log4j.appender.ErrorLog.layout
=
org.apache.log4j.PatternLayout
log4j.appender.ErrorLog.layout.ConversionPattern
=
%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/main/resources/templates/index.html
0 → 100644
浏览文件 @
cf4f0d95
<!DOCTYPE html>
<html
lang=
"en"
>
<head>
<meta
charset=
"UTF-8"
>
<title>
Index
</title>
</head>
<body>
<h1>
Hello~~~
</h1>
</body>
</html>
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/TaosdemoApplicationTests.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo
;
import
org.junit.jupiter.api.Test
;
import
org.springframework.boot.test.context.SpringBootTest
;
@SpringBootTest
class
TaosdemoApplicationTests
{
@Test
void
contextLoads
()
{
}
}
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/DatabaseMapperTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.mapper
;
import
org.junit.Test
;
import
org.junit.runner.RunWith
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.boot.test.context.SpringBootTest
;
import
org.springframework.test.context.junit4.SpringRunner
;
import
java.util.HashMap
;
import
java.util.Map
;
@RunWith
(
SpringRunner
.
class
)
@SpringBootTest
public
class
DatabaseMapperTest
{
@Autowired
private
DatabaseMapper
databaseMapper
;
@Test
public
void
createDatabase
()
{
databaseMapper
.
createDatabase
(
"db_test"
);
}
@Test
public
void
dropDatabase
()
{
databaseMapper
.
dropDatabase
(
"db_test"
);
}
@Test
public
void
creatDatabaseWithParameters
()
{
Map
<
String
,
String
>
map
=
new
HashMap
<>();
map
.
put
(
"dbname"
,
"weather"
);
map
.
put
(
"keep"
,
"3650"
);
map
.
put
(
"days"
,
"30"
);
map
.
put
(
"replica"
,
"1"
);
databaseMapper
.
createDatabaseWithParameters
(
map
);
}
@Test
public
void
useDatabase
()
{
databaseMapper
.
useDatabase
(
"test"
);
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SubTableMapperTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.mapper
;
import
com.taosdata.taosdemo.domain.*
;
import
org.junit.Before
;
import
org.junit.Test
;
import
org.junit.runner.RunWith
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.boot.test.context.SpringBootTest
;
import
org.springframework.test.context.junit4.SpringRunner
;
import
java.util.ArrayList
;
import
java.util.List
;
@RunWith
(
SpringRunner
.
class
)
@SpringBootTest
public
class
SubTableMapperTest
{
@Autowired
private
SubTableMapper
subTableMapper
;
private
List
<
SubTableValue
>
tables
;
@Test
public
void
createUsingSuperTable
()
{
SubTableMeta
subTableMeta
=
new
SubTableMeta
();
subTableMeta
.
setDatabase
(
"test"
);
subTableMeta
.
setSupertable
(
"weather"
);
subTableMeta
.
setName
(
"t1"
);
List
<
TagValue
>
tags
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
3
;
i
++)
{
tags
.
add
(
new
TagValue
(
"tag"
+
(
i
+
1
),
"nchar(64)"
));
}
subTableMeta
.
setTags
(
tags
);
subTableMapper
.
createUsingSuperTable
(
subTableMeta
);
}
@Test
public
void
insertOneTableMultiValues
()
{
subTableMapper
.
insertOneTableMultiValues
(
tables
.
get
(
0
));
}
@Test
public
void
insertOneTableMultiValuesUsingSuperTable
()
{
subTableMapper
.
insertOneTableMultiValuesUsingSuperTable
(
tables
.
get
(
0
));
}
@Test
public
void
insertMultiTableMultiValues
()
{
subTableMapper
.
insertMultiTableMultiValues
(
tables
);
}
@Test
public
void
insertMultiTableMultiValuesUsingSuperTable
()
{
subTableMapper
.
insertMultiTableMultiValuesUsingSuperTable
(
tables
);
}
@Before
public
void
before
()
{
tables
=
new
ArrayList
<>();
for
(
int
ind
=
0
;
ind
<
3
;
ind
++)
{
SubTableValue
table
=
new
SubTableValue
();
table
.
setDatabase
(
"test"
);
// supertable
table
.
setSupertable
(
"weather"
);
table
.
setName
(
"t"
+
(
ind
+
1
));
// tags
List
<
TagValue
>
tags
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
3
;
i
++)
{
tags
.
add
(
new
TagValue
(
"tag"
+
(
i
+
1
),
"beijing"
));
}
table
.
setTags
(
tags
);
// values
List
<
RowValue
>
values
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
2
;
i
++)
{
List
<
FieldValue
>
fields
=
new
ArrayList
<>();
for
(
int
j
=
0
;
j
<
4
;
j
++)
{
fields
.
add
(
new
FieldValue
(
"f"
+
(
j
+
1
),
(
j
+
1
)
*
10
));
}
values
.
add
(
new
RowValue
(
fields
));
}
table
.
setValues
(
values
);
tables
.
add
(
table
);
}
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SuperTableMapperTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.mapper
;
import
com.taosdata.taosdemo.domain.FieldMeta
;
import
com.taosdata.taosdemo.domain.SuperTableMeta
;
import
com.taosdata.taosdemo.domain.TagMeta
;
import
org.junit.Test
;
import
org.junit.runner.RunWith
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.boot.test.context.SpringBootTest
;
import
org.springframework.test.context.junit4.SpringRunner
;
import
java.util.ArrayList
;
import
java.util.List
;
@RunWith
(
SpringRunner
.
class
)
@SpringBootTest
public
class
SuperTableMapperTest
{
@Autowired
private
SuperTableMapper
superTableMapper
;
@Test
public
void
testCreateSuperTableUsingSQL
()
{
String
sql
=
"create table test.weather (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"
;
superTableMapper
.
createSuperTableUsingSQL
(
sql
);
}
@Test
public
void
createSuperTable
()
{
SuperTableMeta
superTableMeta
=
new
SuperTableMeta
();
superTableMeta
.
setDatabase
(
"test"
);
superTableMeta
.
setName
(
"weather"
);
List
<
FieldMeta
>
fields
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
5
;
i
++)
{
fields
.
add
(
new
FieldMeta
(
"f"
+
(
i
+
1
),
"int"
));
}
superTableMeta
.
setFields
(
fields
);
List
<
TagMeta
>
tags
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
3
;
i
++)
{
tags
.
add
(
new
TagMeta
(
"t"
+
(
i
+
1
),
"nchar(64)"
));
}
superTableMeta
.
setTags
(
tags
);
superTableMapper
.
createSuperTable
(
superTableMeta
);
}
@Test
public
void
dropSuperTable
()
{
superTableMapper
.
dropSuperTable
(
"test"
,
"weather"
);
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/TableMapperTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.mapper
;
import
com.taosdata.taosdemo.domain.*
;
import
org.junit.Test
;
import
org.junit.runner.RunWith
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.boot.test.context.SpringBootTest
;
import
org.springframework.test.context.junit4.SpringRunner
;
import
java.util.ArrayList
;
import
java.util.List
;
import
java.util.Random
;
@SpringBootTest
@RunWith
(
SpringRunner
.
class
)
public
class
TableMapperTest
{
@Autowired
private
TableMapper
tableMapper
;
private
static
Random
random
=
new
Random
(
System
.
currentTimeMillis
());
@Test
public
void
create
()
{
TableMeta
table
=
new
TableMeta
();
table
.
setDatabase
(
"test"
);
table
.
setName
(
"t1"
);
List
<
FieldMeta
>
fields
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
3
;
i
++)
{
FieldMeta
field
=
new
FieldMeta
();
field
.
setName
(
"f"
+
(
i
+
1
));
field
.
setType
(
"nchar(64)"
);
fields
.
add
(
field
);
}
table
.
setFields
(
fields
);
tableMapper
.
create
(
table
);
}
@Test
public
void
insertOneTableMultiValues
()
{
TableValue
table
=
new
TableValue
();
table
.
setDatabase
(
"test"
);
table
.
setName
(
"t1"
);
List
<
RowValue
>
values
=
new
ArrayList
<>();
for
(
int
j
=
0
;
j
<
5
;
j
++)
{
List
<
FieldValue
>
fields
=
new
ArrayList
<>();
for
(
int
k
=
0
;
k
<
2
;
k
++)
{
FieldValue
field
=
new
FieldValue
<>();
field
.
setValue
((
k
+
1
)
*
100
);
fields
.
add
(
field
);
}
values
.
add
(
new
RowValue
(
fields
));
}
table
.
setValues
(
values
);
tableMapper
.
insertOneTableMultiValues
(
table
);
}
@Test
public
void
insertOneTableMultiValuesWithCoulmns
()
{
TableValue
tableValue
=
new
TableValue
();
tableValue
.
setDatabase
(
"test"
);
tableValue
.
setName
(
"weather"
);
// columns
List
<
FieldMeta
>
columns
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
3
;
i
++)
{
FieldMeta
field
=
new
FieldMeta
();
field
.
setName
(
"f"
+
(
i
+
1
));
columns
.
add
(
field
);
}
tableValue
.
setColumns
(
columns
);
// values
List
<
RowValue
>
values
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
3
;
i
++)
{
List
<
FieldValue
>
fields
=
new
ArrayList
<>();
for
(
int
j
=
0
;
j
<
3
;
j
++)
{
FieldValue
field
=
new
FieldValue
();
field
.
setValue
(
j
);
fields
.
add
(
field
);
}
values
.
add
(
new
RowValue
(
fields
));
}
tableValue
.
setValues
(
values
);
tableMapper
.
insertOneTableMultiValuesWithColumns
(
tableValue
);
}
@Test
public
void
insertMultiTableMultiValues
()
{
List
<
TableValue
>
tables
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
3
;
i
++)
{
TableValue
table
=
new
TableValue
();
table
.
setDatabase
(
"test"
);
table
.
setName
(
"t"
+
(
i
+
1
));
List
<
RowValue
>
values
=
new
ArrayList
<>();
for
(
int
j
=
0
;
j
<
5
;
j
++)
{
List
<
FieldValue
>
fields
=
new
ArrayList
<>();
for
(
int
k
=
0
;
k
<
2
;
k
++)
{
FieldValue
field
=
new
FieldValue
<>();
field
.
setValue
((
k
+
1
)
*
10
);
fields
.
add
(
field
);
}
values
.
add
(
new
RowValue
(
fields
));
}
table
.
setValues
(
values
);
tables
.
add
(
table
);
}
tableMapper
.
insertMultiTableMultiValues
(
tables
);
}
@Test
public
void
insertMultiTableMultiValuesWithCoulumns
()
{
List
<
TableValue
>
tables
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
3
;
i
++)
{
TableValue
table
=
new
TableValue
();
table
.
setDatabase
(
"test"
);
table
.
setName
(
"t"
+
(
i
+
1
));
// columns
List
<
FieldMeta
>
columns
=
new
ArrayList
<>();
for
(
int
j
=
0
;
j
<
3
;
j
++)
{
FieldMeta
field
=
new
FieldMeta
();
field
.
setName
(
"f"
+
(
j
+
1
));
columns
.
add
(
field
);
}
table
.
setColumns
(
columns
);
// values
List
<
RowValue
>
values
=
new
ArrayList
<>();
for
(
int
j
=
0
;
j
<
5
;
j
++)
{
List
<
FieldValue
>
fields
=
new
ArrayList
<>();
for
(
int
k
=
0
;
k
<
columns
.
size
();
k
++)
{
FieldValue
field
=
new
FieldValue
<>();
field
.
setValue
((
k
+
1
)
*
10
);
fields
.
add
(
field
);
}
values
.
add
(
new
RowValue
(
fields
));
}
table
.
setValues
(
values
);
tables
.
add
(
table
);
}
tableMapper
.
insertMultiTableMultiValuesWithColumns
(
tables
);
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service
;
import
org.junit.Test
;
import
org.junit.runner.RunWith
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.boot.test.context.SpringBootTest
;
import
org.springframework.test.context.junit4.SpringRunner
;
@RunWith
(
SpringRunner
.
class
)
@SpringBootTest
public
class
DatabaseServiceTest
{
@Autowired
private
DatabaseService
service
;
@Test
public
void
testCreateDatabase1
()
{
service
.
createDatabase
(
"testXXXX"
);
}
@Test
public
void
dropDatabase
()
{
service
.
dropDatabase
(
"testXXXX"
);
}
@Test
public
void
useDatabase
()
{
service
.
useDatabase
(
"test"
);
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SubTableServiceTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service
;
import
com.taosdata.taosdemo.domain.SubTableMeta
;
import
com.taosdata.taosdemo.domain.TagValue
;
import
org.junit.Before
;
import
org.junit.Test
;
import
org.junit.runner.RunWith
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.boot.test.context.SpringBootTest
;
import
org.springframework.test.context.junit4.SpringRunner
;
import
java.util.ArrayList
;
import
java.util.List
;
@RunWith
(
SpringRunner
.
class
)
@SpringBootTest
public
class
SubTableServiceTest
{
@Autowired
private
SubTableService
service
;
private
List
<
SubTableMeta
>
subTables
;
@Before
public
void
before
()
{
subTables
=
new
ArrayList
<>();
for
(
int
i
=
1
;
i
<=
1
;
i
++)
{
SubTableMeta
subTableMeta
=
new
SubTableMeta
();
subTableMeta
.
setDatabase
(
"test"
);
subTableMeta
.
setSupertable
(
"weather"
);
subTableMeta
.
setName
(
"t"
+
i
);
List
<
TagValue
>
tags
=
new
ArrayList
<>();
tags
.
add
(
new
TagValue
(
"location"
,
"beijing"
));
tags
.
add
(
new
TagValue
(
"groupId"
,
i
));
subTableMeta
.
setTags
(
tags
);
subTables
.
add
(
subTableMeta
);
}
}
@Test
public
void
testCreateSubTable
()
{
int
count
=
service
.
createSubTable
(
subTables
);
System
.
out
.
println
(
"count >>> "
+
count
);
}
@Test
public
void
testCreateSubTableList
()
{
int
count
=
service
.
createSubTable
(
subTables
,
10
);
System
.
out
.
println
(
"count >>> "
+
count
);
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service
;
import
com.taosdata.taosdemo.domain.FieldMeta
;
import
com.taosdata.taosdemo.domain.SuperTableMeta
;
import
com.taosdata.taosdemo.domain.TagMeta
;
import
org.junit.Test
;
import
org.junit.runner.RunWith
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.boot.test.context.SpringBootTest
;
import
org.springframework.test.context.junit4.SpringRunner
;
import
java.util.ArrayList
;
import
java.util.List
;
@RunWith
(
SpringRunner
.
class
)
@SpringBootTest
public
class
SuperTableServiceTest
{
@Autowired
private
SuperTableService
service
;
@Test
public
void
testCreate
()
{
SuperTableMeta
superTableMeta
=
new
SuperTableMeta
();
superTableMeta
.
setDatabase
(
"test"
);
superTableMeta
.
setName
(
"weather"
);
List
<
FieldMeta
>
fields
=
new
ArrayList
<>();
fields
.
add
(
new
FieldMeta
(
"ts"
,
"timestamp"
));
fields
.
add
(
new
FieldMeta
(
"temperature"
,
"float"
));
fields
.
add
(
new
FieldMeta
(
"humidity"
,
"int"
));
superTableMeta
.
setFields
(
fields
);
List
<
TagMeta
>
tags
=
new
ArrayList
<>();
tags
.
add
(
new
TagMeta
(
"location"
,
"nchar(64)"
));
tags
.
add
(
new
TagMeta
(
"groupId"
,
"int"
));
superTableMeta
.
setTags
(
tags
);
service
.
create
(
superTableMeta
);
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service
;
import
com.taosdata.taosdemo.domain.TableMeta
;
import
org.junit.Before
;
import
org.junit.Test
;
import
org.junit.runner.RunWith
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.boot.test.context.SpringBootTest
;
import
org.springframework.test.context.junit4.SpringRunner
;
import
java.util.ArrayList
;
import
java.util.List
;
@RunWith
(
SpringRunner
.
class
)
@SpringBootTest
public
class
TableServiceTest
{
@Autowired
private
TableService
tableService
;
private
List
<
TableMeta
>
tables
;
@Before
public
void
before
()
{
tables
=
new
ArrayList
<>();
for
(
int
i
=
0
;
i
<
1
;
i
++)
{
TableMeta
tableMeta
=
new
TableMeta
();
tableMeta
.
setDatabase
(
"test"
);
tableMeta
.
setName
(
"weather"
+
(
i
+
1
));
tables
.
add
(
tableMeta
);
}
}
@Test
public
void
testCreate
()
{
int
count
=
tableService
.
create
(
tables
);
System
.
out
.
println
(
count
);
}
@Test
public
void
testCreateMultiThreads
()
{
System
.
out
.
println
(
tableService
.
create
(
tables
,
10
));
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/FieldValueGeneratorTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service.data
;
import
com.taosdata.taosdemo.domain.FieldMeta
;
import
com.taosdata.taosdemo.domain.RowValue
;
import
com.taosdata.taosdemo.utils.TimeStampUtil
;
import
org.junit.After
;
import
org.junit.Assert
;
import
org.junit.Test
;
import
java.util.ArrayList
;
import
java.util.List
;
public
class
FieldValueGeneratorTest
{
private
List
<
RowValue
>
rowValues
;
@Test
public
void
generate
()
{
List
<
FieldMeta
>
fieldMetas
=
new
ArrayList
<>();
fieldMetas
.
add
(
new
FieldMeta
(
"ts"
,
"timestamp"
));
fieldMetas
.
add
(
new
FieldMeta
(
"temperature"
,
"float"
));
fieldMetas
.
add
(
new
FieldMeta
(
"humidity"
,
"int"
));
long
start
=
TimeStampUtil
.
datetimeToLong
(
"2020-01-01 00:00:00.000"
);
long
end
=
TimeStampUtil
.
datetimeToLong
(
"2020-01-01 10:00:00.000"
);
rowValues
=
FieldValueGenerator
.
generate
(
start
,
end
,
1000
l
*
3600
,
fieldMetas
);
Assert
.
assertEquals
(
10
,
rowValues
.
size
());
}
@Test
public
void
disrupt
()
{
List
<
FieldMeta
>
fieldMetas
=
new
ArrayList
<>();
fieldMetas
.
add
(
new
FieldMeta
(
"ts"
,
"timestamp"
));
fieldMetas
.
add
(
new
FieldMeta
(
"temperature"
,
"float"
));
fieldMetas
.
add
(
new
FieldMeta
(
"humidity"
,
"int"
));
long
start
=
TimeStampUtil
.
datetimeToLong
(
"2020-01-01 00:00:00.000"
);
long
end
=
TimeStampUtil
.
datetimeToLong
(
"2020-01-01 10:00:00.000"
);
rowValues
=
FieldValueGenerator
.
generate
(
start
,
end
,
1000
l
*
3600
l
,
fieldMetas
);
FieldValueGenerator
.
disrupt
(
rowValues
,
20
,
1000
);
Assert
.
assertEquals
(
10
,
rowValues
.
size
());
}
@After
public
void
after
()
{
for
(
RowValue
row
:
rowValues
)
{
row
.
getFields
().
stream
().
forEach
(
field
->
{
if
(
field
.
getName
().
equals
(
"ts"
))
{
System
.
out
.
print
(
TimeStampUtil
.
longToDatetime
((
Long
)
field
.
getValue
()));
}
else
System
.
out
.
print
(
" ,"
+
field
.
getValue
());
});
System
.
out
.
println
();
}
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SubTableMetaGeneratorTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service.data
;
import
com.taosdata.taosdemo.domain.FieldMeta
;
import
com.taosdata.taosdemo.domain.SubTableMeta
;
import
com.taosdata.taosdemo.domain.SuperTableMeta
;
import
com.taosdata.taosdemo.domain.TagMeta
;
import
org.junit.After
;
import
org.junit.Assert
;
import
org.junit.Test
;
import
java.util.ArrayList
;
import
java.util.List
;
public
class
SubTableMetaGeneratorTest
{
List
<
SubTableMeta
>
subTableMetas
;
@Test
public
void
generate
()
{
SuperTableMeta
superTableMeta
=
new
SuperTableMeta
();
superTableMeta
.
setDatabase
(
"test"
);
superTableMeta
.
setName
(
"weather"
);
List
<
FieldMeta
>
fields
=
new
ArrayList
<>();
fields
.
add
(
new
FieldMeta
(
"ts"
,
"timestamp"
));
fields
.
add
(
new
FieldMeta
(
"temperature"
,
"float"
));
fields
.
add
(
new
FieldMeta
(
"humidity"
,
"int"
));
superTableMeta
.
setFields
(
fields
);
List
<
TagMeta
>
tags
=
new
ArrayList
<>();
tags
.
add
(
new
TagMeta
(
"location"
,
"nchar(64)"
));
tags
.
add
(
new
TagMeta
(
"groupId"
,
"int"
));
superTableMeta
.
setTags
(
tags
);
subTableMetas
=
SubTableMetaGenerator
.
generate
(
superTableMeta
,
10
,
"t"
);
Assert
.
assertEquals
(
10
,
subTableMetas
.
size
());
Assert
.
assertEquals
(
"t1"
,
subTableMetas
.
get
(
0
).
getName
());
Assert
.
assertEquals
(
"t2"
,
subTableMetas
.
get
(
1
).
getName
());
Assert
.
assertEquals
(
"t3"
,
subTableMetas
.
get
(
2
).
getName
());
Assert
.
assertEquals
(
"t4"
,
subTableMetas
.
get
(
3
).
getName
());
Assert
.
assertEquals
(
"t5"
,
subTableMetas
.
get
(
4
).
getName
());
Assert
.
assertEquals
(
"t6"
,
subTableMetas
.
get
(
5
).
getName
());
Assert
.
assertEquals
(
"t7"
,
subTableMetas
.
get
(
6
).
getName
());
Assert
.
assertEquals
(
"t8"
,
subTableMetas
.
get
(
7
).
getName
());
Assert
.
assertEquals
(
"t9"
,
subTableMetas
.
get
(
8
).
getName
());
Assert
.
assertEquals
(
"t10"
,
subTableMetas
.
get
(
9
).
getName
());
}
@After
public
void
after
()
{
for
(
SubTableMeta
subTableMeta
:
subTableMetas
)
{
System
.
out
.
println
(
subTableMeta
);
}
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SuperTableMetaGeneratorImplTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service.data
;
import
com.taosdata.taosdemo.domain.FieldMeta
;
import
com.taosdata.taosdemo.domain.SuperTableMeta
;
import
com.taosdata.taosdemo.domain.TagMeta
;
import
org.junit.After
;
import
org.junit.Assert
;
import
org.junit.Test
;
public
class
SuperTableMetaGeneratorImplTest
{
private
SuperTableMeta
meta
;
@Test
public
void
generate
()
{
String
sql
=
"create table test.weather (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"
;
meta
=
SuperTableMetaGenerator
.
generate
(
sql
);
Assert
.
assertEquals
(
"test"
,
meta
.
getDatabase
());
Assert
.
assertEquals
(
"weather"
,
meta
.
getName
());
Assert
.
assertEquals
(
3
,
meta
.
getFields
().
size
());
Assert
.
assertEquals
(
"ts"
,
meta
.
getFields
().
get
(
0
).
getName
());
Assert
.
assertEquals
(
"timestamp"
,
meta
.
getFields
().
get
(
0
).
getType
());
Assert
.
assertEquals
(
"temperature"
,
meta
.
getFields
().
get
(
1
).
getName
());
Assert
.
assertEquals
(
"float"
,
meta
.
getFields
().
get
(
1
).
getType
());
Assert
.
assertEquals
(
"humidity"
,
meta
.
getFields
().
get
(
2
).
getName
());
Assert
.
assertEquals
(
"int"
,
meta
.
getFields
().
get
(
2
).
getType
());
Assert
.
assertEquals
(
"location"
,
meta
.
getTags
().
get
(
0
).
getName
());
Assert
.
assertEquals
(
"nchar(64)"
,
meta
.
getTags
().
get
(
0
).
getType
());
Assert
.
assertEquals
(
"groupid"
,
meta
.
getTags
().
get
(
1
).
getName
());
Assert
.
assertEquals
(
"int"
,
meta
.
getTags
().
get
(
1
).
getType
());
}
@Test
public
void
generate2
()
{
meta
=
SuperTableMetaGenerator
.
generate
(
"test"
,
"weather"
,
10
,
"col"
,
10
,
"tag"
);
Assert
.
assertEquals
(
"test"
,
meta
.
getDatabase
());
Assert
.
assertEquals
(
"weather"
,
meta
.
getName
());
Assert
.
assertEquals
(
11
,
meta
.
getFields
().
size
());
for
(
FieldMeta
fieldMeta
:
meta
.
getFields
())
{
Assert
.
assertNotNull
(
fieldMeta
.
getName
());
Assert
.
assertNotNull
(
fieldMeta
.
getType
());
}
for
(
TagMeta
tagMeta
:
meta
.
getTags
())
{
Assert
.
assertNotNull
(
tagMeta
.
getName
());
Assert
.
assertNotNull
(
tagMeta
.
getType
());
}
}
@After
public
void
after
()
{
System
.
out
.
println
(
meta
.
getDatabase
());
System
.
out
.
println
(
meta
.
getName
());
for
(
FieldMeta
fieldMeta
:
meta
.
getFields
())
{
System
.
out
.
println
(
fieldMeta
);
}
for
(
TagMeta
tagMeta
:
meta
.
getTags
())
{
System
.
out
.
println
(
tagMeta
);
}
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/TagValueGeneratorTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.service.data
;
import
com.taosdata.taosdemo.domain.TagMeta
;
import
com.taosdata.taosdemo.domain.TagValue
;
import
org.junit.After
;
import
org.junit.Assert
;
import
org.junit.Test
;
import
java.util.ArrayList
;
import
java.util.List
;
public
class
TagValueGeneratorTest
{
List
<
TagValue
>
tagvalues
;
@Test
public
void
generate
()
{
List
<
TagMeta
>
tagMetaList
=
new
ArrayList
<>();
tagMetaList
.
add
(
new
TagMeta
(
"location"
,
"nchar(10)"
));
tagMetaList
.
add
(
new
TagMeta
(
"groupId"
,
"int"
));
tagMetaList
.
add
(
new
TagMeta
(
"ts"
,
"timestamp"
));
tagMetaList
.
add
(
new
TagMeta
(
"temperature"
,
"float"
));
tagMetaList
.
add
(
new
TagMeta
(
"humidity"
,
"double"
));
tagMetaList
.
add
(
new
TagMeta
(
"text"
,
"binary(10)"
));
tagvalues
=
TagValueGenerator
.
generate
(
tagMetaList
);
Assert
.
assertEquals
(
"location"
,
tagvalues
.
get
(
0
).
getName
());
Assert
.
assertEquals
(
"groupId"
,
tagvalues
.
get
(
1
).
getName
());
Assert
.
assertEquals
(
"ts"
,
tagvalues
.
get
(
2
).
getName
());
Assert
.
assertEquals
(
"temperature"
,
tagvalues
.
get
(
3
).
getName
());
Assert
.
assertEquals
(
"humidity"
,
tagvalues
.
get
(
4
).
getName
());
Assert
.
assertEquals
(
"text"
,
tagvalues
.
get
(
5
).
getName
());
}
@After
public
void
after
()
{
tagvalues
.
stream
().
forEach
(
System
.
out
::
println
);
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/DataGeneratorTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.utils
;
import
org.junit.Assert
;
import
org.junit.Test
;
public
class
DataGeneratorTest
{
@Test
public
void
randomValue
()
{
for
(
int
i
=
0
;
i
<
TaosConstants
.
DATA_TYPES
.
length
;
i
++)
{
System
.
out
.
println
(
TaosConstants
.
DATA_TYPES
[
i
]
+
" >>> "
+
DataGenerator
.
randomValue
(
TaosConstants
.
DATA_TYPES
[
i
]));
}
}
@Test
public
void
randomNchar
()
{
String
s
=
DataGenerator
.
randomNchar
(
10
);
Assert
.
assertEquals
(
10
,
s
.
length
());
}
}
\ No newline at end of file
tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/TimeStampUtilTest.java
0 → 100644
浏览文件 @
cf4f0d95
package
com.taosdata.taosdemo.utils
;
import
org.junit.Test
;
import
static
org
.
junit
.
Assert
.
assertEquals
;
public
class
TimeStampUtilTest
{
@Test
public
void
datetimeToLong
()
{
final
String
startTime
=
"2005-01-01 00:00:00.000"
;
long
start
=
TimeStampUtil
.
datetimeToLong
(
startTime
);
assertEquals
(
1104508800000
l
,
start
);
String
dateTimeStr
=
TimeStampUtil
.
longToDatetime
(
start
);
assertEquals
(
"2005-01-01 00:00:00.000"
,
dateTimeStr
);
}
@Test
public
void
longToDatetime
()
{
String
datetime
=
TimeStampUtil
.
longToDatetime
(
1510000000000L
);
assertEquals
(
"2017-11-07 04:26:40.000"
,
datetime
);
long
timestamp
=
TimeStampUtil
.
datetimeToLong
(
datetime
);
assertEquals
(
1510000000000L
,
timestamp
);
}
@Test
public
void
range
()
{
long
start
=
TimeStampUtil
.
datetimeToLong
(
"2020-10-01 00:00:00.000"
);
long
timeGap
=
1000
;
long
numOfRowsPerTable
=
1000
l
*
3600
l
*
24
l
*
90
l
;
TimeStampUtil
.
TimeTuple
timeTuple
=
TimeStampUtil
.
range
(
start
,
timeGap
,
numOfRowsPerTable
);
System
.
out
.
println
(
TimeStampUtil
.
longToDatetime
(
timeTuple
.
start
));
System
.
out
.
println
(
TimeStampUtil
.
longToDatetime
(
timeTuple
.
end
));
System
.
out
.
println
(
timeTuple
.
timeGap
);
}
}
\ No newline at end of file
tests/pytest/concurrent_inquiry.py
浏览文件 @
cf4f0d95
...
...
@@ -43,7 +43,8 @@ class ConcurrentInquiry:
self
.
subtb_stru_list
=
[]
self
.
stb_tag_list
=
[]
self
.
subtb_tag_list
=
[]
self
.
probabilities
=
[
0.05
,
0.95
]
self
.
ifjoin
=
[
0
,
1
]
def
SetThreadsNum
(
self
,
num
):
self
.
numOfTherads
=
num
...
...
@@ -105,7 +106,7 @@ class ConcurrentInquiry:
conn
.
close
()
#query condition
def
con_where
(
self
,
tlist
):
def
con_where
(
self
,
tlist
,
col_list
,
tag_list
):
l
=
[]
for
i
in
range
(
random
.
randint
(
0
,
len
(
tlist
))):
c
=
random
.
choice
(
where_list
)
...
...
@@ -115,19 +116,26 @@ class ConcurrentInquiry:
l
.
append
(
random
.
choice
(
tlist
)
+
c
)
return
'where '
+
random
.
choice
([
' and '
,
' or '
]).
join
(
l
)
def
con_interval
(
self
,
tlist
):
return
random
.
choice
([
'interval(10s)'
,
'interval(10d)'
,
'interval(1n)'
])
def
con_interval
(
self
,
tlist
,
col_list
,
tag_list
):
interval
=
'interval('
+
str
(
random
.
randint
(
0
,
100
))
+
random
.
choice
([
'a'
,
's'
,
'd'
,
'w'
,
'n'
,
'y'
])
+
')'
return
interval
def
con_limit
(
self
,
tlist
):
return
random
.
choice
([
'limit 10'
,
'limit 10 offset 10'
,
'slimit 10'
,
'slimit 10 offset 10'
,
'limit 10 slimit 10'
,
'limit 10 offset 5 slimit 5 soffset 10'
])
def
con_limit
(
self
,
tlist
,
col_list
,
tag_list
):
rand1
=
str
(
random
.
randint
(
0
,
1000
))
rand2
=
str
(
random
.
randint
(
0
,
1000
))
return
random
.
choice
([
'limit '
+
rand1
,
'limit '
+
rand1
+
' offset '
+
rand2
,
' slimit '
+
rand1
,
' slimit '
+
rand1
+
' offset '
+
rand2
,
'limit '
+
rand1
+
' slimit '
+
rand2
,
'limit '
+
rand1
+
' offset'
+
rand2
+
' slimit '
+
rand1
+
' soffset '
+
rand2
])
def
con_fill
(
self
,
tlist
):
def
con_fill
(
self
,
tlist
,
col_list
,
tag_list
):
return
random
.
choice
([
'fill(null)'
,
'fill(prev)'
,
'fill(none)'
,
'fill(LINEAR)'
])
def
con_group
(
self
,
tlist
):
return
'group by '
+
random
.
choice
(
tlist
)
def
con_group
(
self
,
tlist
,
col_list
,
tag_list
):
rand_tag
=
random
.
randint
(
0
,
5
)
rand_col
=
random
.
randint
(
0
,
1
)
return
'group by '
+
','
.
join
(
random
.
sample
(
col_list
,
rand_col
))
+
','
.
join
(
random
.
sample
(
tag_list
,
rand_tag
))
def
con_order
(
self
,
tlist
):
def
con_order
(
self
,
tlist
,
col_list
,
tag_list
):
return
'order by '
+
random
.
choice
(
tlist
)
def
gen_query_sql
(
self
):
#生成查询语句
...
...
@@ -158,22 +166,89 @@ class ConcurrentInquiry:
sel_col_list
=
[]
col_rand
=
random
.
randint
(
0
,
len
(
col_list
))
for
i
,
j
in
zip
(
col_list
[
0
:
col_rand
],
func_list
):
#决定每个被查询col的函数
alias
=
' as '
+
str
(
i
)
pick_func
=
''
if
j
==
'leastsquares'
:
sel_col_list
.
append
(
j
+
'('
+
i
+
',1,1)'
)
pick_func
=
j
+
'('
+
i
+
',1,1)'
elif
j
==
'top'
or
j
==
'bottom'
or
j
==
'percentile'
or
j
==
'apercentile'
:
sel_col_list
.
append
(
j
+
'('
+
i
+
',1)'
)
pick_func
=
j
+
'('
+
i
+
',1)'
else
:
sel_col_list
.
append
(
j
+
'('
+
i
+
')'
)
pick_func
=
j
+
'('
+
i
+
')'
if
bool
(
random
.
getrandbits
(
1
)):
pick_func
+=
alias
sel_col_list
.
append
(
pick_func
)
sql
=
sql
+
','
.
join
(
sel_col_list
)
+
' from '
+
random
.
choice
(
self
.
stb_list
+
self
.
subtb_list
)
+
' '
#select col & func
con_func
=
[
self
.
con_where
,
self
.
con_interval
,
self
.
con_limit
,
self
.
con_group
,
self
.
con_order
,
self
.
con_fill
]
sel_con
=
random
.
sample
(
con_func
,
random
.
randint
(
0
,
len
(
con_func
)))
sel_con_list
=
[]
for
i
in
sel_con
:
sel_con_list
.
append
(
i
(
tlist
))
#获取对应的条件函数
sel_con_list
.
append
(
i
(
tlist
,
col_list
,
tag_list
))
#获取对应的条件函数
sql
+=
' '
.
join
(
sel_con_list
)
# condition
print
(
sql
)
return
sql
def
gen_query_join
(
self
):
#生成join查询语句
tbname
=
[]
col_list
=
[]
tag_list
=
[]
col_intersection
=
[]
tag_intersection
=
[]
subtable
=
None
if
bool
(
random
.
getrandbits
(
1
)):
subtable
=
True
tbname
=
random
.
sample
(
self
.
subtb_list
,
2
)
for
i
in
tbname
:
col_list
.
append
(
self
.
subtb_stru_list
[
self
.
subtb_list
.
index
(
i
)])
tag_list
.
append
(
self
.
subtb_stru_list
[
self
.
subtb_list
.
index
(
i
)])
col_intersection
=
list
(
set
(
col_list
[
0
]).
intersection
(
set
(
col_list
[
1
])))
tag_intersection
=
list
(
set
(
tag_list
[
0
]).
intersection
(
set
(
tag_list
[
1
])))
else
:
tbname
=
random
.
sample
(
self
.
stb_list
,
2
)
for
i
in
tbname
:
col_list
.
append
(
self
.
stb_stru_list
[
self
.
stb_list
.
index
(
i
)])
tag_list
.
append
(
self
.
stb_stru_list
[
self
.
stb_list
.
index
(
i
)])
col_intersection
=
list
(
set
(
col_list
[
0
]).
intersection
(
set
(
col_list
[
1
])))
tag_intersection
=
list
(
set
(
tag_list
[
0
]).
intersection
(
set
(
tag_list
[
1
])))
con_rand
=
random
.
randint
(
0
,
len
(
condition_list
))
col_rand
=
random
.
randint
(
0
,
len
(
col_list
))
tag_rand
=
random
.
randint
(
0
,
len
(
tag_list
))
sql
=
'select '
#select
sel_col_tag
=
[]
col_rand
=
random
.
randint
(
0
,
len
(
col_list
))
if
bool
(
random
.
getrandbits
(
1
)):
sql
+=
'*'
else
:
sel_col_tag
.
append
(
't1.'
+
str
(
random
.
choice
(
col_list
[
0
]
+
tag_list
[
0
])))
sel_col_tag
.
append
(
't2.'
+
str
(
random
.
choice
(
col_list
[
1
]
+
tag_list
[
1
])))
sql
+=
','
.
join
(
sel_col_tag
)
sql
=
sql
+
' from '
+
str
(
tbname
[
0
])
+
' t1,'
+
str
(
tbname
[
1
])
+
' t2 '
#select col & func
join_section
=
None
if
subtable
:
join_section
=
''
.
join
(
random
.
choices
(
col_intersection
))
sql
+=
'where t1._c0 = t2._c0 and '
+
't1.'
+
join_section
+
'=t2.'
+
join_section
else
:
join_section
=
''
.
join
(
random
.
choices
(
col_intersection
+
tag_intersection
))
sql
+=
'where t1._c0 = t2._c0 and '
+
't1.'
+
join_section
+
'=t2.'
+
join_section
print
(
sql
)
return
sql
def
random_pick
(
self
):
x
=
random
.
uniform
(
0
,
1
)
cumulative_probability
=
0.0
for
item
,
item_probability
in
zip
(
self
.
ifjoin
,
self
.
probabilities
):
cumulative_probability
+=
item_probability
if
x
<
cumulative_probability
:
break
return
item
def
rest_query
(
self
,
sql
):
#rest 接口
host
=
"127.0.0.1"
user
=
"root"
...
...
@@ -210,6 +285,7 @@ class ConcurrentInquiry:
nRows
=
rj
[
'rows'
]
if
(
'rows'
in
rj
)
else
0
return
nRows
def
query_thread_n
(
self
,
threadID
):
#使用原生python接口查询
host
=
"127.0.0.1"
user
=
"root"
...
...
@@ -227,7 +303,10 @@ class ConcurrentInquiry:
while
True
:
try
:
if
self
.
random_pick
():
sql
=
self
.
gen_query_sql
()
else
:
sql
=
self
.
gen_query_join
()
print
(
"sql is "
,
sql
)
start
=
time
.
time
()
cl
.
execute
(
sql
)
...
...
@@ -247,7 +326,10 @@ class ConcurrentInquiry:
print
(
"Thread %d: starting"
%
threadID
)
while
True
:
try
:
if
self
.
random_pick
():
sql
=
self
.
gen_query_sql
()
else
:
sql
=
self
.
gen_query_join
()
print
(
"sql is "
,
sql
)
start
=
time
.
time
()
self
.
rest_query
(
sql
)
...
...
@@ -270,7 +352,6 @@ class ConcurrentInquiry:
threads
.
append
(
thread
)
thread
.
start
()
for
i
in
range
(
self
.
r_numOfTherads
):
# for i in range(1):
thread
=
threading
.
Thread
(
target
=
self
.
query_thread_r
,
args
=
(
i
,))
threads
.
append
(
thread
)
thread
.
start
()
...
...
tests/pytest/fulltest.sh
浏览文件 @
cf4f0d95
...
...
@@ -32,10 +32,8 @@ python3 ./test.py -f table/create_sensitive.py
python3 ./test.py
-f
table/max_table_length.py
python3 ./test.py
-f
table/alter_column.py
python3 ./test.py
-f
table/boundary.py
python3 ./test.py
-f
table/create-a-lot.py
python3 ./test.py
-f
table/create.py
python3 ./test.py
-f
table/del_stable.py
python3 ./test.py
-f
table/queryWithTaosdKilled.py
# tag
...
...
@@ -173,6 +171,9 @@ python3 ./test.py -f query/sliding.py
python3 ./test.py
-f
query/unionAllTest.py
python3 ./test.py
-f
query/bug2281.py
python3 ./test.py
-f
query/bug2119.py
python3 ./test.py
-f
query/isNullTest.py
python3 ./test.py
-f
query/queryWithTaosdKilled.py
#stream
python3 ./test.py
-f
stream/metric_1.py
python3 ./test.py
-f
stream/new.py
...
...
@@ -213,6 +214,7 @@ python3 ./test.py -f functions/function_stddev.py -r 1
python3 ./test.py
-f
functions/function_sum.py
-r
1
python3 ./test.py
-f
functions/function_top.py
-r
1
#python3 ./test.py -f functions/function_twa.py -r 1
python3 ./test.py
-f
functions/function_twa_test2.py
python3 queryCount.py
python3 ./test.py
-f
query/queryGroupbyWithInterval.py
python3 client/twoClients.py
...
...
tests/pytest/functions/function_twa_test2.py
0 → 100644
浏览文件 @
cf4f0d95
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
taos
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
import
numpy
as
np
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
self
.
rowNum
=
10
self
.
ts
=
1537146000000
def
run
(
self
):
tdSql
.
prepare
()
tdSql
.
execute
(
"create table t1(ts timestamp, c int)"
)
for
i
in
range
(
self
.
rowNum
):
tdSql
.
execute
(
"insert into t1 values(%d, %d)"
%
(
self
.
ts
+
i
*
10000
,
i
+
1
))
# twa verifacation
tdSql
.
query
(
"select twa(c) from t1 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' "
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
5.5
)
tdSql
.
query
(
"select twa(c) from t1 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' interval(10s)"
)
tdSql
.
checkRows
(
10
)
tdSql
.
checkData
(
0
,
1
,
1.49995
)
tdSql
.
checkData
(
1
,
1
,
2.49995
)
tdSql
.
checkData
(
2
,
1
,
3.49995
)
tdSql
.
checkData
(
3
,
1
,
4.49995
)
tdSql
.
checkData
(
4
,
1
,
5.49995
)
tdSql
.
checkData
(
5
,
1
,
6.49995
)
tdSql
.
checkData
(
6
,
1
,
7.49995
)
tdSql
.
checkData
(
7
,
1
,
8.49995
)
tdSql
.
checkData
(
8
,
1
,
9.49995
)
tdSql
.
checkData
(
9
,
1
,
10
)
tdSql
.
query
(
"select twa(c) from t1 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' interval(10s) sliding(5s)"
)
tdSql
.
checkRows
(
20
)
tdSql
.
checkData
(
0
,
1
,
1.24995
)
tdSql
.
checkData
(
1
,
1
,
1.49995
)
tdSql
.
checkData
(
2
,
1
,
1.99995
)
tdSql
.
checkData
(
3
,
1
,
2.49995
)
tdSql
.
checkData
(
4
,
1
,
2.99995
)
tdSql
.
checkData
(
5
,
1
,
3.49995
)
tdSql
.
checkData
(
6
,
1
,
3.99995
)
tdSql
.
checkData
(
7
,
1
,
4.49995
)
tdSql
.
checkData
(
8
,
1
,
4.99995
)
tdSql
.
checkData
(
9
,
1
,
5.49995
)
tdSql
.
checkData
(
10
,
1
,
5.99995
)
tdSql
.
checkData
(
11
,
1
,
6.49995
)
tdSql
.
checkData
(
12
,
1
,
6.99995
)
tdSql
.
checkData
(
13
,
1
,
7.49995
)
tdSql
.
checkData
(
14
,
1
,
7.99995
)
tdSql
.
checkData
(
15
,
1
,
8.49995
)
tdSql
.
checkData
(
16
,
1
,
8.99995
)
tdSql
.
checkData
(
17
,
1
,
9.49995
)
tdSql
.
checkData
(
18
,
1
,
9.75000
)
tdSql
.
checkData
(
19
,
1
,
10
)
tdSql
.
execute
(
"create table t2(ts timestamp, c int)"
)
tdSql
.
execute
(
"insert into t2 values(%d, 1)"
%
(
self
.
ts
+
3000
))
tdSql
.
query
(
"select twa(c) from t2 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' "
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
query
(
"select twa(c) from t2 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' interval(2s) "
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
1
,
1
)
tdSql
.
query
(
"select twa(c) from t2 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' interval(2s) sliding(1s) "
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
1
,
1
)
tdSql
.
checkData
(
1
,
1
,
1
)
tdSql
.
query
(
"select twa(c) from t2 where ts >= '2018-09-17 09:00:04.000' and ts <= '2018-09-17 09:01:30.000' "
)
tdSql
.
checkRows
(
0
)
tdSql
.
query
(
"select twa(c) from t2 where ts >= '2018-09-17 08:00:00.000' and ts <= '2018-09-17 09:00:00.000' "
)
tdSql
.
checkRows
(
0
)
tdSql
.
execute
(
"create table t3(ts timestamp, c int)"
)
tdSql
.
execute
(
"insert into t3 values(%d, 1)"
%
(
self
.
ts
))
tdSql
.
execute
(
"insert into t3 values(%d, -2)"
%
(
self
.
ts
+
3000
))
tdSql
.
query
(
"select twa(c) from t3 where ts >= '2018-09-17 08:59:00.000' and ts <= '2018-09-17 09:01:30.000'"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
-
0.5
)
tdSql
.
query
(
"select twa(c) from t3 where ts >= '2018-09-17 08:59:00.000' and ts <= '2018-09-17 09:01:30.000' interval(1s)"
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
1
,
0.5005
)
tdSql
.
checkData
(
1
,
1
,
-
2
)
tdSql
.
query
(
"select twa(c) from t3 where ts >= '2018-09-17 08:59:00.000' and ts <= '2018-09-17 09:01:30.000' interval(2s) sliding(1s)"
)
tdSql
.
checkRows
(
4
)
tdSql
.
checkData
(
0
,
1
,
0.5005
)
tdSql
.
checkData
(
1
,
1
,
0.0005
)
tdSql
.
checkData
(
2
,
1
,
-
1.5
)
tdSql
.
checkData
(
3
,
1
,
-
2
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/pytest_1.sh
浏览文件 @
cf4f0d95
...
...
@@ -19,6 +19,7 @@ python3 ./test.py -f insert/randomNullCommit.py
python3 insert/retentionpolicy.py
python3 ./test.py
-f
insert/alterTableAndInsert.py
python3 ./test.py
-f
insert/insertIntoTwoTables.py
python3 ./test.py
-f
query/isNullTest.py
#table
python3 ./test.py
-f
table/alter_wal0.py
...
...
@@ -30,7 +31,6 @@ python3 ./test.py -f table/create_sensitive.py
python3 ./test.py
-f
table/max_table_length.py
python3 ./test.py
-f
table/alter_column.py
python3 ./test.py
-f
table/boundary.py
python3 ./test.py
-f
table/create-a-lot.py
python3 ./test.py
-f
table/create.py
python3 ./test.py
-f
table/del_stable.py
python3 ./test.py
-f
table/queryWithTaosdKilled.py
...
...
@@ -206,6 +206,7 @@ python3 ./test.py -f functions/function_stddev.py -r 1
python3 ./test.py
-f
functions/function_sum.py
-r
1
python3 ./test.py
-f
functions/function_top.py
-r
1
#python3 ./test.py -f functions/function_twa.py -r 1
python3 ./test.py
-f
functions/function_twa_test2.py
python3 queryCount.py
python3 ./test.py
-f
query/queryGroupbyWithInterval.py
python3 client/twoClients.py
...
...
tests/pytest/query/isNullTest.py
0 → 100644
浏览文件 @
cf4f0d95
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
taos
from
util.log
import
tdLog
from
util.cases
import
tdCases
from
util.sql
import
tdSql
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
self
.
ts
=
1537146000000
def
run
(
self
):
tdSql
.
prepare
()
print
(
"==============step1"
)
tdSql
.
execute
(
"create table st(ts timestamp, c1 int, c2 binary(20), c3 nchar(20)) tags(t1 int, t2 binary(20), t3 nchar(20))"
)
tdSql
.
execute
(
"create table t1 using st tags(1, 'binary1', 'nchar1')"
)
tdSql
.
execute
(
"insert into t2(ts, c2) using st(t2) tags('') values(%d, '')"
%
(
self
.
ts
+
10
))
tdSql
.
execute
(
"insert into t3(ts, c2) using st(t3) tags('') values(%d, '')"
%
(
self
.
ts
+
10
))
for
i
in
range
(
10
):
tdSql
.
execute
(
"insert into t1 values(%d, %d, 'binary%d', 'nchar%d')"
%
(
self
.
ts
+
i
,
i
,
i
,
i
))
tdSql
.
execute
(
"insert into t2 values(%d, %d, 'binary%d', 'nchar%d')"
%
(
self
.
ts
+
i
,
i
,
i
,
i
))
tdSql
.
execute
(
"insert into t3 values(%d, %d, 'binary%d', 'nchar%d')"
%
(
self
.
ts
+
i
,
i
,
i
,
i
))
tdSql
.
execute
(
"insert into t1(ts, c2) values(%d, '')"
%
(
self
.
ts
+
10
))
tdSql
.
execute
(
"insert into t1(ts, c3) values(%d, '')"
%
(
self
.
ts
+
11
))
tdSql
.
execute
(
"insert into t2(ts, c3) values(%d, '')"
%
(
self
.
ts
+
11
))
tdSql
.
execute
(
"insert into t3(ts, c3) values(%d, '')"
%
(
self
.
ts
+
11
))
tdSql
.
query
(
"select count(*) from st"
)
tdSql
.
checkData
(
0
,
0
,
36
)
tdSql
.
query
(
"select count(*) from st where t1 is null"
)
tdSql
.
checkData
(
0
,
0
,
24
)
tdSql
.
query
(
"select count(*) from st where t1 is not null"
)
tdSql
.
checkData
(
0
,
0
,
12
)
tdSql
.
query
(
"select count(*) from st where t2 is null"
)
tdSql
.
checkData
(
0
,
0
,
12
)
tdSql
.
query
(
"select count(*) from st where t2 is not null"
)
tdSql
.
checkData
(
0
,
0
,
24
)
tdSql
.
error
(
"select count(*) from st where t2 <> null"
)
tdSql
.
error
(
"select count(*) from st where t2 = null"
)
tdSql
.
query
(
"select count(*) from st where t2 = '' "
)
tdSql
.
checkData
(
0
,
0
,
12
)
tdSql
.
query
(
"select count(*) from st where t2 <> '' "
)
tdSql
.
checkData
(
0
,
0
,
24
)
tdSql
.
query
(
"select count(*) from st where t3 is null"
)
tdSql
.
checkData
(
0
,
0
,
12
)
tdSql
.
query
(
"select count(*) from st where t3 is not null"
)
tdSql
.
checkData
(
0
,
0
,
24
)
tdSql
.
error
(
"select count(*) from st where t3 <> null"
)
tdSql
.
error
(
"select count(*) from st where t3 = null"
)
tdSql
.
query
(
"select count(*) from st where t3 = '' "
)
tdSql
.
checkData
(
0
,
0
,
12
)
tdSql
.
query
(
"select count(*) from st where t3 <> '' "
)
tdSql
.
checkData
(
0
,
0
,
24
)
tdSql
.
query
(
"select count(*) from st where c1 is not null"
)
tdSql
.
checkData
(
0
,
0
,
30
)
tdSql
.
query
(
"select count(*) from st where c1 is null"
)
tdSql
.
checkData
(
0
,
0
,
6
)
tdSql
.
query
(
"select count(*) from st where c2 is not null"
)
tdSql
.
checkData
(
0
,
0
,
33
)
tdSql
.
query
(
"select count(*) from st where c2 is null"
)
tdSql
.
checkData
(
0
,
0
,
3
)
tdSql
.
error
(
"select count(*) from st where c2 <> null"
)
tdSql
.
error
(
"select count(*) from st where c2 = null"
)
tdSql
.
query
(
"select count(*) from st where c2 = '' "
)
tdSql
.
checkData
(
0
,
0
,
3
)
tdSql
.
query
(
"select count(*) from st where c2 <> '' "
)
tdSql
.
checkData
(
0
,
0
,
30
)
tdSql
.
query
(
"select count(*) from st where c3 is not null"
)
tdSql
.
checkData
(
0
,
0
,
33
)
tdSql
.
query
(
"select count(*) from st where c3 is null"
)
tdSql
.
checkData
(
0
,
0
,
3
)
tdSql
.
error
(
"select count(*) from st where c3 <> null"
)
tdSql
.
error
(
"select count(*) from st where c3 = null"
)
tdSql
.
query
(
"select count(*) from st where c3 = '' "
)
tdSql
.
checkData
(
0
,
0
,
3
)
tdSql
.
query
(
"select count(*) from st where c3 <> '' "
)
tdSql
.
checkData
(
0
,
0
,
30
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/query/queryNullValueTest.py
浏览文件 @
cf4f0d95
...
...
@@ -50,7 +50,7 @@ class TDTestCase:
tdSql
.
execute
(
"insert into t0 values (%d, NULL)"
%
(
self
.
ts
))
tdDnodes
.
stop
(
1
)
tdLog
.
sleep
(
10
)
#
tdLog.sleep(10)
tdDnodes
.
start
(
1
)
tdSql
.
execute
(
"use db"
)
tdSql
.
query
(
"select * from t0"
)
...
...
@@ -62,7 +62,7 @@ class TDTestCase:
tdSql
.
execute
(
"create table t1 (ts timestamp, col %s)"
%
self
.
types
[
i
])
tdSql
.
execute
(
"insert into t1 values (%d, NULL)"
%
(
self
.
ts
))
tdDnodes
.
stop
(
1
)
tdLog
.
sleep
(
10
)
#
tdLog.sleep(10)
tdDnodes
.
start
(
1
)
tdSql
.
execute
(
"use db"
)
...
...
tests/pytest/query/queryWithTaosdKilled.py
0 → 100644
浏览文件 @
cf4f0d95
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
os
import
taos
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
*
import
numpy
as
np
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
self
.
conn
=
conn
tdSql
.
init
(
conn
.
cursor
())
self
.
rowNum
=
10
self
.
ts
=
1537146000000
def
createOldDir
(
self
):
path
=
tdDnodes
.
dnodes
[
1
].
getDnodeRootDir
(
1
)
print
(
path
)
tdLog
.
info
(
"sudo mkdir -p %s/data/vnode/vnode2/wal/old"
%
path
)
os
.
system
(
"sudo mkdir -p %s/data/vnode/vnode2/wal/old"
%
path
)
def
run
(
self
):
# os.system("rm -rf %s/ " % tdDnodes.getDnodesRootDir())
tdSql
.
prepare
()
tdSql
.
execute
(
"create table st(ts timestamp, speed int)"
)
tdSql
.
execute
(
"insert into st values(now, 1)"
)
tdSql
.
query
(
"select count(*) from st"
)
tdSql
.
checkRows
(
1
)
self
.
createOldDir
()
tdLog
.
sleep
(
10
)
print
(
"force kill taosd"
)
os
.
system
(
"sudo kill -9 $(pgrep -x taosd)"
)
os
.
system
(
""
)
tdDnodes
.
start
(
1
)
tdSql
.
init
(
self
.
conn
.
cursor
())
tdSql
.
execute
(
"use db"
)
tdSql
.
query
(
"select count(*) from st"
)
tdSql
.
checkRows
(
1
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/stream/stream2.py
浏览文件 @
cf4f0d95
...
...
@@ -88,6 +88,10 @@ class TDTestCase:
except
Exception
as
e
:
tdLog
.
info
(
repr
(
e
))
tdSql
.
query
(
"show streams"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
2
,
's0'
)
tdLog
.
info
(
"===== step8 ====="
)
tdSql
.
query
(
"select count(*), count(col1), count(col2) from stb0 interval(1d)"
)
...
...
@@ -142,6 +146,12 @@ class TDTestCase:
except
Exception
as
e
:
tdLog
.
info
(
repr
(
e
))
tdSql
.
query
(
"show streams"
)
tdSql
.
checkRows
(
2
)
tdSql
.
checkData
(
0
,
2
,
's1'
)
tdSql
.
checkData
(
1
,
2
,
's0'
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
...
...
tests/pytest/update/append_commit_data.py
浏览文件 @
cf4f0d95
...
...
@@ -38,38 +38,62 @@ class TDTestCase:
insertRows
=
200
t0
=
1604298064000
sql
=
'insert into db.t1 values '
temp
=
''
tdLog
.
info
(
"insert %d rows"
%
(
insertRows
))
for
i
in
range
(
0
,
insertRows
):
# ret = tdSql.execute(
# 'insert into t1 values (%d , 1)' %
# (t0+i))
temp
+=
'(%d,1)'
%
(
t0
+
i
)
if
i
%
100
==
0
or
i
==
(
insertRows
-
1
):
print
(
sql
+
temp
)
ret
=
tdSql
.
execute
(
'insert into t1 values (%d , 1)'
%
(
t0
+
i
))
sql
+
temp
)
temp
=
''
print
(
"==========step2"
)
print
(
"restart to commit "
)
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
tdSql
.
query
(
"select * from db.t1"
)
tdSql
.
checkRows
(
insertRows
)
for
k
in
range
(
0
,
100
):
tdLog
.
info
(
"insert %d rows"
%
(
insertRows
))
temp
=
''
for
i
in
range
(
0
,
insertRows
):
temp
+=
'(%d,1)'
%
(
t0
+
k
*
200
+
i
)
if
i
%
100
==
0
or
i
==
(
insertRows
-
1
):
print
(
sql
+
temp
)
ret
=
tdSql
.
execute
(
'insert into db.t1 values(%d,1)'
%
(
t0
+
k
*
200
+
i
)
sql
+
temp
)
temp
=
''
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
tdSql
.
query
(
"select * from db.t1"
)
tdSql
.
checkRows
(
insertRows
+
200
*
k
)
print
(
"==========step
2
"
)
print
(
"==========step
3
"
)
print
(
"insert into another table "
)
s
=
'use db'
tdSql
.
execute
(
s
)
ret
=
tdSql
.
execute
(
'create table t2 (ts timestamp, a int)'
)
insertRows
=
20000
sql
=
'insert into t2 values '
temp
=
''
for
i
in
range
(
0
,
insertRows
):
# ret = tdSql.execute(
# 'insert into t2 values (%d, 1)' %
# (t0+i))
temp
+=
'(%d,1)'
%
(
t0
+
i
)
if
i
%
500
==
0
or
i
==
(
insertRows
-
1
):
print
(
sql
+
temp
)
ret
=
tdSql
.
execute
(
'insert into t2 values (%d, 1)'
%
(
t0
+
i
))
sql
+
temp
)
temp
=
''
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
tdSql
.
query
(
"select * from t2"
)
...
...
tests/pytest/util/dnodes.py
浏览文件 @
cf4f0d95
...
...
@@ -255,9 +255,19 @@ class TDDnode:
tdLog
.
exit
(
cmd
)
self
.
running
=
1
tdLog
.
debug
(
"dnode:%d is running with %s "
%
(
self
.
index
,
cmd
))
time
.
sleep
(
0.1
)
key
=
'from offline to online'
bkey
=
bytes
(
key
,
encoding
=
"utf8"
)
logFile
=
self
.
logDir
+
"/taosdlog.0"
popen
=
subprocess
.
Popen
(
'tail -f '
+
logFile
,
stdout
=
subprocess
.
PIPE
,
stderr
=
subprocess
.
PIPE
,
shell
=
True
)
while
True
:
line
=
popen
.
stdout
.
readline
().
strip
()
if
bkey
in
line
:
popen
.
kill
()
break
tdLog
.
debug
(
"the dnode:%d has been started."
%
(
self
.
index
))
tdLog
.
debug
(
"wait 5 seconds for the dnode:%d to start."
%
(
self
.
index
))
time
.
sleep
(
5
)
# time.sleep(5)
def
startWithoutSleep
(
self
):
buildPath
=
self
.
getBuildPath
()
...
...
tests/script/general/parser/function.sim
浏览文件 @
cf4f0d95
...
...
@@ -22,7 +22,7 @@ $db = $dbPrefix . $i
$mt = $mtPrefix . $i
sql drop database if exists $db
sql create database $db
sql create database $db
keep 36500
sql use $db
print =====================================> test case for twa in single block
...
...
@@ -111,7 +111,7 @@ if $rows != 2 then
return -1
endi
if $data00 != @15-08-18 00:06:00.00@ then
if $data00 != @15-08-18 00:06:00.00
0
@ then
return -1
endi
...
...
@@ -219,10 +219,28 @@ if $data02 != 6 then
return -1
endi
sql select twa(k) from t1 where ts>'2015-8-18 00:00:00' and ts<'2015-8-18 00:00:1'
if $rows != 0 then
return -1
endi
sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00' interval(10m) order by ts asc
sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00' interval(10m) order by ts desc
#todo add test case while column filte exists.
#todo add test case while column filter exists.
#sql select count(*),TWA(k) from tm0 where ts>='1970-1-1 13:43:00' and ts<='1970-1-1 13:44:10' interval(9s)
sql create table tm0 (ts timestamp, k float);
sql insert into tm0 values(100000000, 5);
sql insert into tm0 values(100003000, -9);
sql select twa(k) from tm0 where ts<now
if $rows != 1 then
return -1
endi
select count(*),TWA(k) from tm0 where ts>='1970-1-1 13:43:00' and ts<='1970-1-1 13:44:10' interval(9s)
if $data00 != -2.000000000 then
print expect -2.000000000, actual: $data00
return -1
endi
tests/script/general/parser/tags_filter.sim
浏览文件 @
cf4f0d95
...
...
@@ -149,4 +149,57 @@ if $rows != 2 then
return -1
endi
print ==================>td-2424
sql create table t1(ts timestamp, k float)
sql insert into t1 values(now, 8.001)
sql select * from t1 where k=8.001
if $rows != 1 then
return -1
endi
sql select * from t1 where k<8.001
if $rows != 0 then
return -1
endi
sql select * from t1 where k<=8.001
if $rows != 1 then
return -1
endi
sql select * from t1 where k>8.001
if $rows != 0 then
return -1
endi
sql select * from t1 where k>=8.001
if $rows != 1 then
return -1
endi
sql select * from t1 where k<>8.001
if $rows != 0 then
return -1
endi
sql select * from t1 where k>=8.001 and k<=8.001
if $rows != 1 then
return -1
endi
sql select * from t1 where k>=8.0009999 and k<=8.001
if $rows != 1 then
return -1
endi
sql select * from t1 where k>8.001 and k<=8.001
if $rows != 0 then
return -1
endi
sql select * from t1 where k>=8.001 and k<8.001
if $rows != 0 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
tests/script/general/parser/testSuite.sim
浏览文件 @
cf4f0d95
...
...
@@ -103,6 +103,8 @@ sleep 500
run general/parser/timestamp.sim
sleep 500
run general/parser/sliding.sim
sleep 500
run general/parser/function.sim
#sleep 500
#run general/parser/repeatStream.sim
...
...
tests/script/unique/arbitrator/insert_duplicationTs.sim
浏览文件 @
cf4f0d95
...
...
@@ -91,8 +91,11 @@ while $i < $tblNum
$i = $i + 1
endw
sql show db.vgroups;
print d1: $data04 $data05 , d2: $data06 $data07
sql select count(*) from $stb
print rows:$rows data00:$data00
print r
test1==> r
ows:$rows data00:$data00
if $rows != 1 then
return -1
endi
...
...
@@ -103,6 +106,15 @@ endi
$totalRows = $data00
sql select count(*) from $stb
print test2==> rows:$rows data00:$data00
sql select count(*) from $stb
print test3==> rows:$rows data00:$data00
sql select count(*) from $stb
print test4==> rows:$rows data00:$data00
sql select count(*) from $stb
print test5==> rows:$rows data00:$data00
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now - 40d , -40 )
...
...
@@ -153,12 +165,21 @@ if $data00 != $totalRows then
return -1
endi
sql select count(*) from $stb
print data00 $data00
sql select count(*) from $stb
print data00 $data00
sql select count(*) from $stb
print data00 $data00
sql select count(*) from $stb
print data00 $data00
print ============== step5: insert two data rows: now-16d, now+16d,
sql insert into $tb values ( now - 21d , -21 )
sql insert into $tb values ( now - 41d , -41 )
$totalRows = $totalRows + 2
print ============== step
5
: restart dnode2, waiting sync end
print ============== step
6
: restart dnode2, waiting sync end
system sh/exec.sh -n dnode2 -s start
sleep 3000
$loopCnt = 0
...
...
@@ -192,9 +213,81 @@ endi
sleep $sleepTimer
# check using select
sleep 5000
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录