提交 78358ef4 编写于 作者: A Alex Duan

[TS-881] merge develop to solve conflict

...@@ -38,7 +38,8 @@ def pre_test(){ ...@@ -38,7 +38,8 @@ def pre_test(){
sudo rmtaos || echo "taosd has not installed" sudo rmtaos || echo "taosd has not installed"
''' '''
sh ''' sh '''
killall -9 taosd ||echo "no taosd running" kill -9 $(pidof taosd) ||echo "no taosd running"
kill -9 $(pidof taosadapter) ||echo "no taosadapter running"
killall -9 gdb || echo "no gdb running" killall -9 gdb || echo "no gdb running"
killall -9 python3.8 || echo "no python program running" killall -9 python3.8 || echo "no python program running"
cd ${WKC} cd ${WKC}
...@@ -56,7 +57,7 @@ def pre_test(){ ...@@ -56,7 +57,7 @@ def pre_test(){
cd ${WKC} cd ${WKC}
git checkout 2.0 git checkout 2.0
''' '''
} }
else{ else{
sh ''' sh '''
cd ${WKC} cd ${WKC}
...@@ -66,6 +67,7 @@ def pre_test(){ ...@@ -66,6 +67,7 @@ def pre_test(){
} }
sh''' sh'''
cd ${WKC} cd ${WKC}
git remote prune origin
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git pull >/dev/null git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge git fetch origin +refs/pull/${CHANGE_ID}/merge
...@@ -87,28 +89,28 @@ def pre_test(){ ...@@ -87,28 +89,28 @@ def pre_test(){
cd ${WK} cd ${WK}
git checkout 2.0 git checkout 2.0
''' '''
} }
else{ else{
sh ''' sh '''
cd ${WK} cd ${WK}
git checkout develop git checkout develop
''' '''
} }
} }
sh ''' sh '''
cd ${WK} cd ${WK}
git pull >/dev/null git pull >/dev/null
export TZ=Asia/Harbin export TZ=Asia/Harbin
date date
git clean -dfx git clean -dfx
mkdir debug mkdir debug
cd debug cd debug
cmake .. > /dev/null cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true > /dev/null
make > /dev/null make > /dev/null
make install > /dev/null make install > /dev/null
cd ${WKC}/tests cd ${WKC}/tests
pip3 install ${WKC}/src/connector/python/ pip3 install ${WKC}/src/connector/python/
''' '''
return 1 return 1
} }
...@@ -130,7 +132,7 @@ def pre_test_noinstall(){ ...@@ -130,7 +132,7 @@ def pre_test_noinstall(){
cd ${WKC} cd ${WKC}
git checkout 2.0 git checkout 2.0
''' '''
} }
else{ else{
sh ''' sh '''
cd ${WKC} cd ${WKC}
...@@ -140,6 +142,7 @@ def pre_test_noinstall(){ ...@@ -140,6 +142,7 @@ def pre_test_noinstall(){
} }
sh''' sh'''
cd ${WKC} cd ${WKC}
git remote prune origin
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git pull >/dev/null git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge git fetch origin +refs/pull/${CHANGE_ID}/merge
...@@ -161,24 +164,24 @@ def pre_test_noinstall(){ ...@@ -161,24 +164,24 @@ def pre_test_noinstall(){
cd ${WK} cd ${WK}
git checkout 2.0 git checkout 2.0
''' '''
} }
else{ else{
sh ''' sh '''
cd ${WK} cd ${WK}
git checkout develop git checkout develop
''' '''
} }
} }
sh ''' sh '''
cd ${WK} cd ${WK}
git pull >/dev/null git pull >/dev/null
export TZ=Asia/Harbin export TZ=Asia/Harbin
date date
git clean -dfx git clean -dfx
mkdir debug mkdir debug
cd debug cd debug
cmake .. > /dev/null cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=false > /dev/null
make make
''' '''
return 1 return 1
...@@ -201,7 +204,7 @@ def pre_test_mac(){ ...@@ -201,7 +204,7 @@ def pre_test_mac(){
cd ${WKC} cd ${WKC}
git checkout 2.0 git checkout 2.0
''' '''
} }
else{ else{
sh ''' sh '''
cd ${WKC} cd ${WKC}
...@@ -211,6 +214,7 @@ def pre_test_mac(){ ...@@ -211,6 +214,7 @@ def pre_test_mac(){
} }
sh''' sh'''
cd ${WKC} cd ${WKC}
git remote prune origin
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git pull >/dev/null git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge git fetch origin +refs/pull/${CHANGE_ID}/merge
...@@ -232,24 +236,24 @@ def pre_test_mac(){ ...@@ -232,24 +236,24 @@ def pre_test_mac(){
cd ${WK} cd ${WK}
git checkout 2.0 git checkout 2.0
''' '''
} }
else{ else{
sh ''' sh '''
cd ${WK} cd ${WK}
git checkout develop git checkout develop
''' '''
} }
} }
sh ''' sh '''
cd ${WK} cd ${WK}
git pull >/dev/null git pull >/dev/null
export TZ=Asia/Harbin export TZ=Asia/Harbin
date date
git clean -dfx git clean -dfx
mkdir debug mkdir debug
cd debug cd debug
cmake .. > /dev/null cmake .. -DBUILD_TOOLS=false > /dev/null
go env -w GOPROXY=https://goproxy.cn,direct go env -w GOPROXY=https://goproxy.cn,direct
go env -w GO111MODULE=on go env -w GO111MODULE=on
cmake --build . cmake --build .
...@@ -264,7 +268,7 @@ def pre_test_win(){ ...@@ -264,7 +268,7 @@ def pre_test_win(){
cd C:\\workspace\\TDinternal cd C:\\workspace\\TDinternal
rd /s /Q C:\\workspace\\TDinternal\\debug rd /s /Q C:\\workspace\\TDinternal\\debug
cd C:\\workspace\\TDinternal\\community cd C:\\workspace\\TDinternal\\community
git reset --hard HEAD~10 git reset --hard HEAD~10
''' '''
script { script {
if (env.CHANGE_TARGET == 'master') { if (env.CHANGE_TARGET == 'master') {
...@@ -278,7 +282,7 @@ def pre_test_win(){ ...@@ -278,7 +282,7 @@ def pre_test_win(){
cd C:\\workspace\\TDinternal\\community cd C:\\workspace\\TDinternal\\community
git checkout 2.0 git checkout 2.0
''' '''
} }
else{ else{
bat ''' bat '''
cd C:\\workspace\\TDinternal\\community cd C:\\workspace\\TDinternal\\community
...@@ -288,7 +292,8 @@ def pre_test_win(){ ...@@ -288,7 +292,8 @@ def pre_test_win(){
} }
bat''' bat'''
cd C:\\workspace\\TDinternal\\community cd C:\\workspace\\TDinternal\\community
git pull git remote prune origin
git pull
git fetch origin +refs/pull/%CHANGE_ID%/merge git fetch origin +refs/pull/%CHANGE_ID%/merge
git checkout -qf FETCH_HEAD git checkout -qf FETCH_HEAD
git clean -dfx git clean -dfx
...@@ -308,36 +313,36 @@ def pre_test_win(){ ...@@ -308,36 +313,36 @@ def pre_test_win(){
cd C:\\workspace\\TDinternal cd C:\\workspace\\TDinternal
git checkout 2.0 git checkout 2.0
''' '''
} }
else{ else{
bat ''' bat '''
cd C:\\workspace\\TDinternal cd C:\\workspace\\TDinternal
git checkout develop git checkout develop
''' '''
} }
} }
bat ''' bat '''
cd C:\\workspace\\TDinternal cd C:\\workspace\\TDinternal
git pull git pull
date date
git clean -dfx git clean -dfx
mkdir debug mkdir debug
cd debug cd debug
call "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat" amd64 call "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat" amd64
cmake ../ -G "NMake Makefiles" cmake ../ -G "NMake Makefiles"
set CL=/MP nmake nmake || exit 8 set CL=/MP nmake nmake || exit 8
nmake install || exit 8 nmake install || exit 8
xcopy /e/y/i/f C:\\workspace\\TDinternal\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 || exit 8 xcopy /e/y/i/f C:\\workspace\\TDinternal\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 || exit 8
cd C:\\workspace\\TDinternal\\community\\src\\connector\\python cd C:\\workspace\\TDinternal\\community\\src\\connector\\python
python -m pip install . python -m pip install .
''' '''
return 1 return 1
} }
pipeline { pipeline {
agent none agent none
options { skipDefaultCheckout() } options { skipDefaultCheckout() }
environment{ environment{
WK = '/var/lib/jenkins/workspace/TDinternal' WK = '/var/lib/jenkins/workspace/TDinternal'
WKC= '/var/lib/jenkins/workspace/TDinternal/community' WKC= '/var/lib/jenkins/workspace/TDinternal/community'
...@@ -345,7 +350,7 @@ pipeline { ...@@ -345,7 +350,7 @@ pipeline {
stages { stages {
stage('pre_build'){ stage('pre_build'){
agent{label 'master'} agent{label 'master'}
options { skipDefaultCheckout() } options { skipDefaultCheckout() }
when { when {
changeRequest() changeRequest()
} }
...@@ -370,32 +375,32 @@ pipeline { ...@@ -370,32 +375,32 @@ pipeline {
// sh ''' // sh '''
// git checkout 2.0 // git checkout 2.0
// ''' // '''
// } // }
// else{ // else{
// sh ''' // sh '''
// git checkout develop // git checkout develop
// ''' // '''
// } // }
// } // }
// sh''' // sh'''
// git fetch origin +refs/pull/${CHANGE_ID}/merge // git fetch origin +refs/pull/${CHANGE_ID}/merge
// git checkout -qf FETCH_HEAD // git checkout -qf FETCH_HEAD
// ''' // '''
// script{ // script{
// skipbuild='2' // skipbuild='2'
// skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true) // skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true)
// println skipbuild // println skipbuild
// } // }
// sh''' // sh'''
// rm -rf ${WORKSPACE}.tes // rm -rf ${WORKSPACE}.tes
// ''' // '''
// } // }
} }
} }
stage('Parallel test stage') { stage('Parallel test stage') {
//only build pr //only build pr
options { skipDefaultCheckout() } options { skipDefaultCheckout() }
when { when {
allOf{ allOf{
changeRequest() changeRequest()
...@@ -414,13 +419,11 @@ pipeline { ...@@ -414,13 +419,11 @@ pipeline {
./test-all.sh p1 ./test-all.sh p1
date''' date'''
} }
} }
} }
stage('python_2_s5') { stage('python_2_s5') {
agent{label " slave5 || slave15 "} agent{label " slave5 || slave15 "}
steps { steps {
pre_test() pre_test()
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
sh ''' sh '''
...@@ -433,8 +436,8 @@ pipeline { ...@@ -433,8 +436,8 @@ pipeline {
} }
stage('python_3_s6') { stage('python_3_s6') {
agent{label " slave6 || slave16 "} agent{label " slave6 || slave16 "}
steps { steps {
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
pre_test() pre_test()
sh ''' sh '''
date date
...@@ -446,8 +449,8 @@ pipeline { ...@@ -446,8 +449,8 @@ pipeline {
} }
stage('test_b1_s2') { stage('test_b1_s2') {
agent{label " slave2 || slave12 "} agent{label " slave2 || slave12 "}
steps { steps {
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
pre_test() pre_test()
sh ''' sh '''
rm -rf /var/lib/taos/* rm -rf /var/lib/taos/*
...@@ -455,23 +458,54 @@ pipeline { ...@@ -455,23 +458,54 @@ pipeline {
nohup taosd >/dev/null & nohup taosd >/dev/null &
sleep 10 sleep 10
''' '''
sh ''' sh '''
cd ${WKC}/tests/examples/nodejs cd ${WKC}/src/connector/python
npm install td2.0-connector > /dev/null 2>&1 export PYTHONPATH=$PWD/
node nodejsChecker.js host=localhost export LD_LIBRARY_PATH=${WKC}/debug/build/lib
node test1970.js pip3 install pytest
cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport pytest tests/
npm install td2.0-connector > /dev/null 2>&1
node nanosecondTest.js python3 examples/bind-multi.py
python3 examples/bind-row.py
python3 examples/demo.py
python3 examples/insert-lines.py
python3 examples/pep-249.py
python3 examples/query-async.py
python3 examples/query-objectively.py
python3 examples/subscribe-sync.py
python3 examples/subscribe-async.py
'''
sh '''
cd ${WKC}/src/connector/nodejs
npm install
npm run test
cd ${WKC}/tests/examples/nodejs
npm install td2.0-connector > /dev/null 2>&1
node nodejsChecker.js host=localhost
node test1970.js
cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport
npm install td2.0-connector > /dev/null 2>&1
node nanosecondTest.js
''' '''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh ''' sh '''
cd ${WKC}/src/connector/C#
dotnet test
dotnet run --project src/test/Cases/Cases.csproj
cd ${WKC}/tests/examples/C#
dotnet run --project C#checker/C#checker.csproj
dotnet run --project TDengineTest/TDengineTest.csproj
dotnet run --project schemaless/schemaless.csproj
cd ${WKC}/tests/examples/C#/taosdemo cd ${WKC}/tests/examples/C#/taosdemo
mcs -out:taosdemo *.cs > /dev/null 2>&1 dotnet build -c Release
echo '' |./taosdemo -c /etc/taos tree | true
./bin/Release/net5.0/taosdemo -c /etc/taos -y
''' '''
} }
sh ''' sh '''
cd ${WKC}/tests/gotest cd ${WKC}/tests/gotest
bash batchtest.sh bash batchtest.sh
...@@ -485,7 +519,7 @@ pipeline { ...@@ -485,7 +519,7 @@ pipeline {
} }
stage('test_crash_gen_s3') { stage('test_crash_gen_s3') {
agent{label " slave3 || slave13 "} agent{label " slave3 || slave13 "}
steps { steps {
pre_test() pre_test()
timeout(time: 60, unit: 'MINUTES'){ timeout(time: 60, unit: 'MINUTES'){
...@@ -515,7 +549,7 @@ pipeline { ...@@ -515,7 +549,7 @@ pipeline {
./test-all.sh b2fq ./test-all.sh b2fq
date date
''' '''
} }
} }
} }
stage('test_valgrind_s4') { stage('test_valgrind_s4') {
...@@ -529,8 +563,8 @@ pipeline { ...@@ -529,8 +563,8 @@ pipeline {
./valgrind-test.sh 2>&1 > mem-error-out.log ./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh ./handle_val_log.sh
''' '''
} }
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
sh ''' sh '''
date date
cd ${WKC}/tests cd ${WKC}/tests
...@@ -546,8 +580,8 @@ pipeline { ...@@ -546,8 +580,8 @@ pipeline {
} }
stage('test_b4_s7') { stage('test_b4_s7') {
agent{label " slave7 || slave17 "} agent{label " slave7 || slave17 "}
steps { steps {
timeout(time: 105, unit: 'MINUTES'){ timeout(time: 105, unit: 'MINUTES'){
pre_test() pre_test()
sh ''' sh '''
date date
...@@ -560,14 +594,13 @@ pipeline { ...@@ -560,14 +594,13 @@ pipeline {
// ./test-all.sh full jdbc // ./test-all.sh full jdbc
// cd ${WKC}/tests // cd ${WKC}/tests
// ./test-all.sh full unit // ./test-all.sh full unit
} }
} }
} }
stage('test_b5_s8') { stage('test_b5_s8') {
agent{label " slave8 || slave18 "} agent{label " slave8 || slave18 "}
steps { steps {
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
pre_test() pre_test()
sh ''' sh '''
date date
...@@ -579,10 +612,14 @@ pipeline { ...@@ -579,10 +612,14 @@ pipeline {
} }
stage('test_b6_s9') { stage('test_b6_s9') {
agent{label " slave9 || slave19 "} agent{label " slave9 || slave19 "}
steps { steps {
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
pre_test() pre_test()
sh ''' sh '''
cd ${WKC}/tests
./test-all.sh develop-test
'''
sh '''
date date
cd ${WKC}/tests cd ${WKC}/tests
./test-all.sh b6fq ./test-all.sh b6fq
...@@ -592,75 +629,79 @@ pipeline { ...@@ -592,75 +629,79 @@ pipeline {
} }
stage('test_b7_s10') { stage('test_b7_s10') {
agent{label " slave10 || slave20 "} agent{label " slave10 || slave20 "}
steps { steps {
timeout(time: 55, unit: 'MINUTES'){ timeout(time: 55, unit: 'MINUTES'){
pre_test() pre_test()
sh ''' sh '''
cd ${WKC}/tests
./test-all.sh system-test
'''
sh '''
date date
cd ${WKC}/tests cd ${WKC}/tests
./test-all.sh b7fq ./test-all.sh b7fq
date''' date'''
} }
} }
} }
stage('arm64centos7') { stage('arm64centos7') {
agent{label " arm64centos7 "} agent{label " arm64centos7 "}
steps { steps {
pre_test_noinstall() pre_test_noinstall()
} }
} }
stage('arm64centos8') { stage('arm64centos8') {
agent{label " arm64centos8 "} agent{label " arm64centos8 "}
steps { steps {
pre_test_noinstall() pre_test_noinstall()
} }
} }
stage('arm32bionic') { stage('arm32bionic') {
agent{label " arm32bionic "} agent{label " arm32bionic "}
steps { steps {
pre_test_noinstall() pre_test_noinstall()
} }
} }
stage('arm64bionic') { stage('arm64bionic') {
agent{label " arm64bionic "} agent{label " arm64bionic "}
steps { steps {
pre_test_noinstall() pre_test_noinstall()
} }
} }
stage('arm64focal') { stage('arm64focal') {
agent{label " arm64focal "} agent{label " arm64focal "}
steps { steps {
pre_test_noinstall() pre_test_noinstall()
} }
} }
stage('centos7') { stage('centos7') {
agent{label " centos7 "} agent{label " centos7 "}
steps { steps {
pre_test_noinstall() pre_test_noinstall()
} }
} }
stage('ubuntu:trusty') { stage('ubuntu:trusty') {
agent{label " trusty "} agent{label " trusty "}
steps { steps {
pre_test_noinstall() pre_test_noinstall()
} }
} }
stage('ubuntu:xenial') { stage('ubuntu:xenial') {
agent{label " xenial "} agent{label " xenial "}
steps { steps {
pre_test_noinstall() pre_test_noinstall()
} }
} }
stage('ubuntu:bionic') { stage('ubuntu:bionic') {
agent{label " bionic "} agent{label " bionic "}
steps { steps {
pre_test_noinstall() pre_test_noinstall()
} }
} }
stage('Mac_build') { stage('Mac_build') {
agent{label " catalina "} agent{label " catalina "}
steps { steps {
pre_test_mac() pre_test_mac()
} }
} }
...@@ -668,7 +709,7 @@ pipeline { ...@@ -668,7 +709,7 @@ pipeline {
agent{label " wintest "} agent{label " wintest "}
steps { steps {
pre_test() pre_test()
script{ script{
while(win_stop == 0){ while(win_stop == 0){
sleep(1) sleep(1)
} }
...@@ -678,7 +719,6 @@ pipeline { ...@@ -678,7 +719,6 @@ pipeline {
stage('test'){ stage('test'){
agent{label "win"} agent{label "win"}
steps{ steps{
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
pre_test_win() pre_test_win()
timeout(time: 20, unit: 'MINUTES'){ timeout(time: 20, unit: 'MINUTES'){
...@@ -687,18 +727,16 @@ pipeline { ...@@ -687,18 +727,16 @@ pipeline {
.\\test-all.bat wintest .\\test-all.bat wintest
''' '''
} }
} }
script{ script{
win_stop=1 win_stop=1
} }
} }
} }
} }
} }
} }
post { post {
success { success {
emailext ( emailext (
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS", subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS",
...@@ -725,7 +763,6 @@ pipeline { ...@@ -725,7 +763,6 @@ pipeline {
<li>提交信息:${env.CHANGE_TITLE}</li> <li>提交信息:${env.CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li> <li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li> <li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
</div> </div>
</ul> </ul>
</td> </td>
...@@ -763,7 +800,6 @@ pipeline { ...@@ -763,7 +800,6 @@ pipeline {
<li>提交信息:${env.CHANGE_TITLE}</li> <li>提交信息:${env.CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li> <li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li> <li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
</div> </div>
</ul> </ul>
</td> </td>
...@@ -775,5 +811,5 @@ pipeline { ...@@ -775,5 +811,5 @@ pipeline {
from: "support@taosdata.com" from: "support@taosdata.com"
) )
} }
} }
} }
...@@ -59,6 +59,9 @@ sudo apt-get install -y maven ...@@ -59,6 +59,9 @@ sudo apt-get install -y maven
``` ```
#### Install build dependencies for taos-tools #### Install build dependencies for taos-tools
We provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. From TDengine 2.4.0.0, taosBenchmark and taosdump were not released together with TDengine.
By default, TDengine compiling does not include taos-tools. You can use 'cmake .. -DBUILD_TOOLS=true' to make them be compiled with TDengine.
To build the [taos-tools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed. To build the [taos-tools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed.
```bash ```bash
sudo apt install libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config sudo apt install libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config
...@@ -100,7 +103,7 @@ sudo dnf install -y maven ...@@ -100,7 +103,7 @@ sudo dnf install -y maven
#### Install build dependencies for taos-tools #### Install build dependencies for taos-tools
To build the [taos-tools](https://github.com/taosdata/taos-tools) on CentOS, the following packages need to be installed. To build the [taos-tools](https://github.com/taosdata/taos-tools) on CentOS, the following packages need to be installed.
```bash ```bash
sudo yum install libz-devel xz-devel snappy-devel jansson-devel pkgconfig libatomic sudo yum install zlib-devel xz-devel snappy-devel jansson-devel pkgconfig libatomic
``` ```
Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it lead a cmake prompt libsnappy not found. But snappy will works well. Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it lead a cmake prompt libsnappy not found. But snappy will works well.
...@@ -142,15 +145,15 @@ mkdir debug && cd debug ...@@ -142,15 +145,15 @@ mkdir debug && cd debug
cmake .. && cmake --build . cmake .. && cmake --build .
``` ```
Note TDengine 2.3.x.0 and later use a component named 'taosadapter' to play http daemon role by default instead of the http daemon embedded in the early version of TDengine. The taosadapter is programmed by go language. If you pull TDengine source code to the latest from an existing codebase, please execute 'git submodule update --init --recursive' to pull taosadapter source code. Please install go language 1.14 or above for compiling taosadapter. If you meet difficulties regarding 'go mod', especially you are from China, you can use a proxy to solve the problem. Note TDengine 2.3.x.0 and later use a component named 'taosAdapter' to play http daemon role by default instead of the http daemon embedded in the early version of TDengine. The taosAdapter is programmed by go language. If you pull TDengine source code to the latest from an existing codebase, please execute 'git submodule update --init --recursive' to pull taosAdapter source code. Please install go language version 1.14 or above for compiling taosAdapter. If you meet difficulties regarding 'go mod', especially you are from China, you can use a proxy to solve the problem.
``` ```
go env -w GO111MODULE=on go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct go env -w GOPROXY=https://goproxy.cn,direct
``` ```
Or you can use the following command to choose to embed old httpd too. The embedded http daemon still be built from TDengine source code by default. Or you can use the following command to choose to build taosAdapter.
``` ```
cmake .. -DBUILD_HTTP=true cmake .. -DBUILD_HTTP=false
``` ```
You can use Jemalloc as memory allocator instead of glibc: You can use Jemalloc as memory allocator instead of glibc:
......
...@@ -136,24 +136,25 @@ IF (TD_ALPINE) ...@@ -136,24 +136,25 @@ IF (TD_ALPINE)
MESSAGE(STATUS "aplhine is defined") MESSAGE(STATUS "aplhine is defined")
ENDIF () ENDIF ()
IF ("${BUILD_HTTP}" STREQUAL "") IF ("${BUILD_HTTP}" STREQUAL "")
IF (TD_LINUX) IF (TD_LINUX)
IF (TD_ARM_32) IF (TD_ARM_32)
SET(BUILD_HTTP "true") SET(TD_BUILD_HTTP TRUE)
ELSE () ELSE ()
SET(BUILD_HTTP "false") SET(TD_BUILD_HTTP TRUE)
ENDIF () ENDIF ()
ELSEIF (TD_DARWIN) ELSEIF (TD_DARWIN)
SET(BUILD_HTTP "false") SET(TD_BUILD_HTTP TRUE)
ELSE () ELSE ()
SET(BUILD_HTTP "true") SET(TD_BUILD_HTTP TRUE)
ENDIF () ENDIF ()
ENDIF ()
IF (${BUILD_HTTP} MATCHES "true")
SET(TD_BUILD_HTTP TRUE)
ELSEIF (${BUILD_HTTP} MATCHES "false") ELSEIF (${BUILD_HTTP} MATCHES "false")
SET(TD_BUILD_HTTP FALSE) SET(TD_BUILD_HTTP FALSE)
ELSEIF (${BUILD_HTTP} MATCHES "true")
SET(TD_BUILD_HTTP TRUE)
ELSE ()
SET(TD_BUILD_HTTP TRUE)
ENDIF () ENDIF ()
IF (TD_BUILD_HTTP) IF (TD_BUILD_HTTP)
...@@ -167,7 +168,7 @@ IF ("${BUILD_TOOLS}" STREQUAL "") ...@@ -167,7 +168,7 @@ IF ("${BUILD_TOOLS}" STREQUAL "")
ELSEIF (TD_ARM_64) ELSEIF (TD_ARM_64)
SET(BUILD_TOOLS "false") SET(BUILD_TOOLS "false")
ELSE () ELSE ()
SET(BUILD_TOOLS "true") SET(BUILD_TOOLS "false")
ENDIF () ENDIF ()
ELSEIF (TD_DARWIN) ELSEIF (TD_DARWIN)
SET(BUILD_TOOLS "false") SET(BUILD_TOOLS "false")
......
...@@ -96,7 +96,7 @@ IF (${BUILD_JDBC} MATCHES "false") ...@@ -96,7 +96,7 @@ IF (${BUILD_JDBC} MATCHES "false")
SET(TD_BUILD_JDBC FALSE) SET(TD_BUILD_JDBC FALSE)
ENDIF () ENDIF ()
SET(TD_BUILD_HTTP FALSE) SET(TD_BUILD_HTTP TRUE)
SET(TD_TAOS_TOOLS TRUE) SET(TD_TAOS_TOOLS TRUE)
......
...@@ -364,7 +364,7 @@ taosdemo 不仅仅可以进行数据写入,也可以执行查询和订阅功 ...@@ -364,7 +364,7 @@ taosdemo 不仅仅可以进行数据写入,也可以执行查询和订阅功
} }
``` ```
以下为 JSON 文件中和查询相关的特有参数含义: 以下为 JSON 文件中和查询相关的特有参数含义:
```
"query_times": 每种查询类型的查询次数 "query_times": 每种查询类型的查询次数
"query_mode": 查询数据接口,"taosc":调用TDengine的c接口;“resetful”:使用restfule接口。可选项。缺省是“taosc”。 "query_mode": 查询数据接口,"taosc":调用TDengine的c接口;“resetful”:使用restfule接口。可选项。缺省是“taosc”。
"specified_table_query": { 指定表的查询 "specified_table_query": { 指定表的查询
...@@ -379,7 +379,7 @@ taosdemo 不仅仅可以进行数据写入,也可以执行查询和订阅功 ...@@ -379,7 +379,7 @@ taosdemo 不仅仅可以进行数据写入,也可以执行查询和订阅功
"threads": 并发执行sqls的线程数,可选项,缺省是1。每个线程负责一部分子表,执行所有的sqls。 "threads": 并发执行sqls的线程数,可选项,缺省是1。每个线程负责一部分子表,执行所有的sqls。
"sql": "select count(*) from xxxx"。查询超级表内所有子表的查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。 "sql": "select count(*) from xxxx"。查询超级表内所有子表的查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。
"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 "result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。
```
以下为一个典型订阅 JSON 示例文件内容: 以下为一个典型订阅 JSON 示例文件内容:
``` ```
...@@ -422,13 +422,13 @@ taosdemo 不仅仅可以进行数据写入,也可以执行查询和订阅功 ...@@ -422,13 +422,13 @@ taosdemo 不仅仅可以进行数据写入,也可以执行查询和订阅功
} }
``` ```
以下为订阅功能相关的特有参数含义: 以下为订阅功能相关的特有参数含义:
```
"interval": 执行订阅的间隔,单位是秒。可选项,缺省是0。 "interval": 执行订阅的间隔,单位是秒。可选项,缺省是0。
"restart": 订阅重启。"yes":如果订阅已经存在,重新开始,"no": 继续之前的订阅。(请注意执行用户需要对 dataDir 目录有读写权限) "restart": 订阅重启。"yes":如果订阅已经存在,重新开始,"no": 继续之前的订阅。(请注意执行用户需要对 dataDir 目录有读写权限)
"keepProgress": 保留订阅信息进度。yes表示保留订阅信息,no表示不保留。该值为yes,restart为no时,才能继续之前的订阅。 "keepProgress": 保留订阅信息进度。yes表示保留订阅信息,no表示不保留。该值为yes,restart为no时,才能继续之前的订阅。
"resubAfterConsume": 配合 keepProgress 使用,在订阅消费了相应次数后调用 unsubscribe 取消订阅并再次订阅。 "resubAfterConsume": 配合 keepProgress 使用,在订阅消费了相应次数后调用 unsubscribe 取消订阅并再次订阅。
"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 "result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。
```
结语 结语
-- --
TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。TDengine 由于数据库内核中创新的数据存储和查询引擎设计,展现出远超同类产品的高效性能。并且由于支持 SQL 语法和多种编程语言的连接器(目前支持 Java, Python, Go, C#, NodeJS, Rust 等),易用性极强,学习成本为零。为了便于运维需求,我们还提供数据迁移和监控功能等相关生态工具软件。 TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维等设计和优化的大数据平台。TDengine 由于数据库内核中创新的数据存储和查询引擎设计,展现出远超同类产品的高效性能。并且由于支持 SQL 语法和多种编程语言的连接器(目前支持 Java, Python, Go, C#, NodeJS, Rust 等),易用性极强,学习成本为零。为了便于运维需求,我们还提供数据迁移和监控功能等相关生态工具软件。
...@@ -440,6 +440,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维 ...@@ -440,6 +440,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维
附录 - 完整 taosdemo 参数介绍 附录 - 完整 taosdemo 参数介绍
-- --
taosdemo支持两种配置参数的模式,一种是命令行参数,一种是使用 JSON 格式的配置文件。 taosdemo支持两种配置参数的模式,一种是命令行参数,一种是使用 JSON 格式的配置文件。
一、命令行参数 一、命令行参数
-f:指定taosdemo所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。 -f:指定taosdemo所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。
...@@ -508,8 +509,9 @@ taosdemo支持两种配置参数的模式,一种是命令行参数,一种是 ...@@ -508,8 +509,9 @@ taosdemo支持两种配置参数的模式,一种是命令行参数,一种是
二、JSON 格式的配置文件中所有参数说明 二、JSON 格式的配置文件中所有参数说明
taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个taosdemo实例不能同时支持三种功能,一个 taosdemo 实例只能支持其中的一种功能,通过配置文件来指定进行哪种功能的测试。 taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个taosdemo实例不能同时支持三种功能,一个 taosdemo 实例只能支持其中的一种功能,通过配置文件来指定进行哪种功能的测试。
1、插入功能测试的 JSON 配置文件
1、插入功能测试的 JSON 配置文件
```
{ {
"filetype": "insert", "filetype": "insert",
"cfgdir": "/etc/taos", "cfgdir": "/etc/taos",
...@@ -571,6 +573,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta ...@@ -571,6 +573,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta
}] }]
}] }]
} }
```
"filetype": 本taosdemo实例进行哪种功能测试。"insert"表示数据插入功能。必选项。 "filetype": 本taosdemo实例进行哪种功能测试。"insert"表示数据插入功能。必选项。
...@@ -600,7 +603,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta ...@@ -600,7 +603,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta
"databases": [{ "databases": [{
"dbinfo": {"name": 数据库名称。必选项。 "dbinfo": {"name": 数据库名称。必选项。
"drop": 如果数据库已经存在,”yes“:删除后重建;”no“:不删除,直接使用。可选项,缺省是”no“。drop = yes 会使其他子表创建相关条目无效。 "drop": 如果数据库已经存在,”yes“:删除后重建;”no“:不删除,直接使用。可选项,缺省是”no“。drop = yes 会使其他子表创建相关条目无效。
...@@ -695,8 +698,9 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta ...@@ -695,8 +698,9 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta
"count":该类型的连续列个数,可选项,缺省是1。 "count":该类型的连续列个数,可选项,缺省是1。
}] }]
2、查询功能测试的 JSON 配置文件
2、查询功能测试的 JSON 配置文件
```
{ {
"filetype": "query", "filetype": "query",
"cfgdir": "/etc/taos", "cfgdir": "/etc/taos",
...@@ -734,7 +738,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta ...@@ -734,7 +738,7 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta
] ]
} }
} }
```
"filetype": 本taosdemo实例进行哪种功能测试。"query"表示数据查询功能。必选项。 "filetype": 本taosdemo实例进行哪种功能测试。"query"表示数据查询功能。必选项。
...@@ -784,8 +788,9 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta ...@@ -784,8 +788,9 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta
注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。
查询结果显示:如果查询线程结束一次查询距开始执行时间超过30秒打印一次查询次数、用时和QPS。所有查询结束时,汇总打印总的查询次数和QPS。 查询结果显示:如果查询线程结束一次查询距开始执行时间超过30秒打印一次查询次数、用时和QPS。所有查询结束时,汇总打印总的查询次数和QPS。
3、订阅功能测试的 JSON 配置文件
3、订阅功能测试的 JSON 配置文件
```
{ {
"filetype":"subscribe", "filetype":"subscribe",
"cfgdir": "/etc/taos", "cfgdir": "/etc/taos",
...@@ -822,7 +827,8 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta ...@@ -822,7 +827,8 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta
"result": "./subscribe_res1.txt" "result": "./subscribe_res1.txt"
}] }]
} }
} }
```
"filetype": 本taosdemo实例进行哪种功能测试。"subscribe"表示数据查询功能。必选项。** "filetype": 本taosdemo实例进行哪种功能测试。"subscribe"表示数据查询功能。必选项。**
...@@ -878,4 +884,4 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta ...@@ -878,4 +884,4 @@ taosdemo支持3种功能的测试,包括插入、查询、订阅。但一个ta
"sql": " select count(*) from xxxx "。查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。 "sql": " select count(*) from xxxx "。查询语句,其中表名必须写成 “xxxx”,实例会自动替换成子表名。
"result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。 "result": 查询结果写入的文件名。可选项,缺省是空,表示查询结果不写入文件。 注意:每条sql语句后的保存结果的文件不能重名,且生成结果文件时,文件名会附加线程号。
...@@ -30,7 +30,7 @@ wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add - ...@@ -30,7 +30,7 @@ wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list
[ beta 版安装包仓库为可选安装项 ] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list [ beta 版安装包仓库为可选安装项 ] echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
sudo apt-get update sudo apt-get update
apt-get policy tdengine apt-cache policy tdengine
sudo apt-get install tdengine sudo apt-get install tdengine
``` ```
......
...@@ -319,118 +319,9 @@ taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相 ...@@ -319,118 +319,9 @@ taosAdapter 相关配置参数请参考 taosadapter --help 命令输出以及相
## <a class="anchor" id="taosadapter2-telegraf"></a> 使用 Bailongma 2.0 接入 Telegraf 数据写入 ## <a class="anchor" id="taosadapter2-telegraf"></a> 使用 Bailongma 2.0 接入 Telegraf 数据写入
*注意:TDengine 新版本(2.3.0.0+)提供新版本 Bailongma ,命名为 taosAdapter ,提供更简便的 Telegraf 数据写入以及其他更强大的功能,Bailongma v2 即之前版本将逐步不再维护。 **注意:**
TDengine 新版本(2.3.0.0+)提供新版本 Bailongma ,命名为 taosAdapter ,提供更简便的 Telegraf 数据写入以及其他更强大的功能,Bailongma v2 及之前版本将逐步不再维护。
[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)是一流行的IT运维数据采集开源工具,TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
### 从源代码编译 blm_telegraf
用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件:
- Linux操作系统的服务器
- 安装好Golang,1.10版本以上
- 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在Bailongma所在的Linux服务器(可以与TDengine在同一台服务器,或者不同服务器)
Bailongma项目中有一个文件夹blm_telegraf,存放了Telegraf的写入API程序。编译过程如下:
```bash
cd blm_telegraf
go build
```
一切正常的情况下,就会在对应的目录下生成一个blm_telegraf的可执行程序。
### 安装 Telegraf
目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统,到Telegraf官网下载安装包,并执行安装。下载地址如下:https://portal.influxdata.com/downloads 。
### 配置 Telegraf
修改Telegraf配置文件/etc/telegraf/telegraf.conf中与TDengine有关的配置项。
在output plugins部分,增加[[outputs.http]]配置项:
- url:Bailongma API服务提供的URL,参考下面的启动示例章节
- data_format:"json"
- json_timestamp_units:"1ms"
在agent部分:
- hostname: 区分不同采集设备的机器名称,需确保其唯一性。
- metric_batch_size: 100,允许Telegraf每批次写入记录最大数量,增大其数量可以降低Telegraf的请求发送频率。
关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)
### 启动 blm_telegraf 程序
blm_telegraf程序有以下选项,在启动blm_telegraf程序时可以通过设定这些选项来设定blm_telegraf的配置。
```bash
--host
TDengine服务端的IP地址,缺省值为空。
--batch-size
blm_telegraf会将收到的telegraf的数据拼装成TDengine的写入请求,这个参数控制一次发给TDengine的写入请求中携带的数据条数。
--dbname
设置在TDengine中创建的数据库名称,blm_telegraf会自动在TDengine中创建一个以dbname为名称的数据库,缺省值是prometheus。
--dbuser
设置访问TDengine的用户名,缺省值是'root'
--dbpassword
设置访问TDengine的密码,缺省值是'taosdata'
--port
blm_telegraf对telegraf提供服务的端口号。
```
### 启动示例
通过以下命令启动一个blm_telegraf的API服务:
```bash
./blm_telegraf -host 127.0.0.1 -port 8089
```
假设blm_telegraf所在服务器的IP地址为"10.1.2.3",则在telegraf的配置文件中, 在output plugins部分,增加[[outputs.http]]配置项:
```yaml
url = "http://10.1.2.3:8089/telegraf"
```
### 查询 telegraf 写入数据
telegraf产生的数据格式如下:
```json
{
"fields": {
"usage_guest": 0,
"usage_guest_nice": 0,
"usage_idle": 89.7897897897898,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 5.405405405405405,
"usage_user": 4.804804804804805
},
"name": "cpu",
"tags": {
"cpu": "cpu2",
"host": "bogon"
},
"timestamp": 1576464360
}
```
其中,name字段为telegraf采集的时序数据的名称,tags字段为该时序数据的标签。blm_telegraf会以时序数据的名称在TDengine中自动创建一个超级表,并将tags字段中的标签转换成TDengine的tag值,timestamp作为时间戳,fields字段中的值作为该时序数据的值。因此在TDengine的客户端中,可以通过以下指令查到这个数据是否成功写入。
```mysql
use telegraf;
select * from cpu;
```
## <a class="anchor" id="emq"></a>EMQ Broker 直接写入 ## <a class="anchor" id="emq"></a>EMQ Broker 直接写入
......
...@@ -145,7 +145,7 @@ taos> ...@@ -145,7 +145,7 @@ taos>
| **CPU类型** | x64(64bit) | | | ARM64 | ARM32 | | **CPU类型** | x64(64bit) | | | ARM64 | ARM32 |
| ------------ | ------------ | -------- | -------- | -------- | ---------- | | ------------ | ------------ | -------- | -------- | -------- | ---------- |
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux | | **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **开发中** | | **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine头文件 *taos.h*,里面列出了提供的API的函数原型。安装后,taos.h位于: C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine头文件 *taos.h*,里面列出了提供的API的函数原型。安装后,taos.h位于:
...@@ -208,6 +208,8 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine ...@@ -208,6 +208,8 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine
返回值为空表示失败。应用程序需要保存返回的参数,以便后续API调用。 返回值为空表示失败。应用程序需要保存返回的参数,以便后续API调用。
**提示:** 同一进程可以根据不同的host/port 连接多个taosd 集群
- `char *taos_get_server_info(TAOS *taos)` - `char *taos_get_server_info(TAOS *taos)`
获取服务端版本信息。 获取服务端版本信息。
......
...@@ -223,7 +223,6 @@ taosd -C ...@@ -223,7 +223,6 @@ taosd -C
| 105 | compressColData | | **S** | bytes | 客户端与服务器之间进行消息通讯过程中,对服务器端查询结果进行列压缩的阈值。 | 0: 对所有查询结果均进行压缩 >0: 查询结果中任意列大小超过该值的消息才进行压缩 -1: 不压缩 | -1 | 2.3.0.0 版本新增。 | | 105 | compressColData | | **S** | bytes | 客户端与服务器之间进行消息通讯过程中,对服务器端查询结果进行列压缩的阈值。 | 0: 对所有查询结果均进行压缩 >0: 查询结果中任意列大小超过该值的消息才进行压缩 -1: 不压缩 | -1 | 2.3.0.0 版本新增。 |
| 106 | tsdbMetaCompactRatio | | **C** | | tsdb meta文件中冗余数据超过多少阈值,开启meta文件的压缩功能 | 0:不开启,[1-100]:冗余数据比例 | 0 | | | 106 | tsdbMetaCompactRatio | | **C** | | tsdb meta文件中冗余数据超过多少阈值,开启meta文件的压缩功能 | 0:不开启,[1-100]:冗余数据比例 | 0 | |
| 107 | rpcForceTcp | | **SC**| | 强制使用TCP传输 | 0: 不开启 1: 开启 | 0 | 在网络比较差的环境中,建议开启。2.0版本新增。| | 107 | rpcForceTcp | | **SC**| | 强制使用TCP传输 | 0: 不开启 1: 开启 | 0 | 在网络比较差的环境中,建议开启。2.0版本新增。|
| 107 | rpcForceTcp | | **SC** | | 强制使用TCP传输。 | 0: 不开启 1: 开启 | 0 | 在网络比较差的环境中,建议开启。2.0 版本新增。 |
**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port) **注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)
...@@ -557,7 +556,7 @@ KILL STREAM <stream-id>; ...@@ -557,7 +556,7 @@ KILL STREAM <stream-id>;
TDengine启动后,会自动创建一个监测数据库log,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在log库里。系统管理员可以从CLI直接查看这个数据库,也可以在WEB通过图形化界面查看这些监测信息。 TDengine启动后,会自动创建一个监测数据库log,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在log库里。系统管理员可以从CLI直接查看这个数据库,也可以在WEB通过图形化界面查看这些监测信息。
这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。 这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项monitor将其关闭或打开。
### TDinsight - 使用监控数据库 + Grafana 对 TDengine 进行监控的解决方案 ### TDinsight - 使用监控数据库 + Grafana 对 TDengine 进行监控的解决方案
......
...@@ -834,7 +834,7 @@ UNION ALL SELECT ... ...@@ -834,7 +834,7 @@ UNION ALL SELECT ...
[UNION ALL SELECT ...] [UNION ALL SELECT ...]
``` ```
TDengine 支持 UNION ALL 操作符。也就是说,如果多个 SELECT 子句返回结果集的结构完全相同(列名、列类型、列数、顺序),那么可以通过 UNION ALL 把这些结果集合并到一起。目前只支持 UNION ALL 模式,也即在结果集的合并过程中是不去重的。 TDengine 支持 UNION ALL 操作符。也就是说,如果多个 SELECT 子句返回结果集的结构完全相同(列名、列类型、列数、顺序),那么可以通过 UNION ALL 把这些结果集合并到一起。目前只支持 UNION ALL 模式,也即在结果集的合并过程中是不去重的。在同一个sql语句中,UNION ALL 最多支持100个。
### SQL 示例 ### SQL 示例
......
...@@ -113,7 +113,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ...@@ -113,7 +113,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
- [System Connection and Task Query Management](/administrator#status): show the system connections, queries, streaming calculation and others - [System Connection and Task Query Management](/administrator#status): show the system connections, queries, streaming calculation and others
- [System Monitor](/administrator#monitoring): monitor TDengine cluster with log database and TDinsight. - [System Monitor](/administrator#monitoring): monitor TDengine cluster with log database and TDinsight.
- [File Directory Structure](/administrator#directories): directories where TDengine data files and configuration files located - [File Directory Structure](/administrator#directories): directories where TDengine data files and configuration files located
- [Parameter Limitss and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter limits and reserved keywords - [Parameter Limits and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter limits and reserved keywords
## Performance: TDengine vs Others ## Performance: TDengine vs Others
......
Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation mehtod, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called taosdemo for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily maniplate the number of columns, data types, disorder ratio, and number of concurrent threads with taosdemo customized parameters. Since TDengine was open sourced in July 2019, it has gained a lot of popularity among time-series database developers with its innovative data modeling design, simple installation method, easy programming interface, and powerful data insertion and query performance. The insertion and querying performance is often astonishing to users who are new to TDengine. In order to help users to experience the high performance and functions of TDengine in the shortest time, we developed an application called taosdemo for insertion and querying performance testing of TDengine. Then user can easily simulate the scenario of a large number of devices generating a very large amount of data. User can easily manipulate the number of columns, data types, disorder ratio, and number of concurrent threads with taosdemo customized parameters.
Running taosdemo is very simple. Just download the TDengine installation package (https://www.taosdata.com/cn/all-downloads/) or compiling the TDengine code yourself (https://github.com/taosdata/TDengine). It can be found and run in the installation directory or in the compiled results directory. Running taosdemo is very simple. Just download the TDengine installation package (https://www.taosdata.com/cn/all-downloads/) or compiling the TDengine code yourself (https://github.com/taosdata/TDengine). It can be found and run in the installation directory or in the compiled results directory.
...@@ -221,7 +221,7 @@ To reach TDengine performance limits, data insertion can be executed by using mu ...@@ -221,7 +221,7 @@ To reach TDengine performance limits, data insertion can be executed by using mu
``` ```
-t, --tables=NUMBER The number of tables. Default is 10000. -t, --tables=NUMBER The number of tables. Default is 10000.
-n, --records=NUMBER The number of records per table. Default is 10000. -n, --records=NUMBER The number of records per table. Default is 10000.
-M, --random The value of records generated are totally random. The default is to simulate power equipment senario. -M, --random The value of records generated are totally random. The default is to simulate power equipment scenario.
``` ```
As mentioned earlier, taosdemo creates 10,000 tables by default, and each table writes 10,000 records. taosdemo can set the number of tables and the number of records in each table by -t and -n. The data generated by default without parameters are simulated real scenarios, and the simulated data are current and voltage phase values with certain jitter, which can more realistically show TDengine's efficient data compression ability. If you need to simulate the generation of completely random data, you can pass the -M parameter. As mentioned earlier, taosdemo creates 10,000 tables by default, and each table writes 10,000 records. taosdemo can set the number of tables and the number of records in each table by -t and -n. The data generated by default without parameters are simulated real scenarios, and the simulated data are current and voltage phase values with certain jitter, which can more realistically show TDengine's efficient data compression ability. If you need to simulate the generation of completely random data, you can pass the -M parameter.
``` ```
...@@ -374,7 +374,7 @@ The following is the content of a typical query JSON example file. ...@@ -374,7 +374,7 @@ The following is the content of a typical query JSON example file.
} }
``` ```
The following parameters are specific to the query in the JSON file. The following parameters are specific to the query in the JSON file.
```
"query_times": the number of queries per query type "query_times": the number of queries per query type
"query_mode": query data interface, "tosc": call TDengine's c interface; "resetful": use restfule interface. Options are available. Default is "taosc". "query_mode": query data interface, "tosc": call TDengine's c interface; "resetful": use restfule interface. Options are available. Default is "taosc".
"specified_table_query": { query for the specified table "specified_table_query": { query for the specified table
...@@ -389,7 +389,7 @@ The following parameters are specific to the query in the JSON file. ...@@ -389,7 +389,7 @@ The following parameters are specific to the query in the JSON file.
"threads": the number of threads to execute sqls concurrently, optional, default is 1. Each thread is responsible for a part of sub-tables and executes all sqls. "threads": the number of threads to execute sqls concurrently, optional, default is 1. Each thread is responsible for a part of sub-tables and executes all sqls.
"sql": "select count(*) from xxxx". Query statement for all sub-tables in the super table, where the table name must be written as "xxxx" and the instance will be replaced with the sub-table name automatically. "sql": "select count(*) from xxxx". Query statement for all sub-tables in the super table, where the table name must be written as "xxxx" and the instance will be replaced with the sub-table name automatically.
"result": the name of the file to which the query result is written. Optional, the default is null, which means the query results are not written to a file. "result": the name of the file to which the query result is written. Optional, the default is null, which means the query results are not written to a file.
```
The following is a typical subscription JSON example file content. The following is a typical subscription JSON example file content.
``` ```
...@@ -432,13 +432,13 @@ The following is a typical subscription JSON example file content. ...@@ -432,13 +432,13 @@ The following is a typical subscription JSON example file content.
} }
``` ```
The following are the meanings of the parameters specific to the subscription function. The following are the meanings of the parameters specific to the subscription function.
```
"interval": interval for executing subscriptions, in seconds. Optional, default is 0. "interval": interval for executing subscriptions, in seconds. Optional, default is 0.
"restart": subscription restart." yes": restart the subscription if it already exists, "no": continue the previous subscription. (Please note that the executing user needs to have read/write access to the dataDir directory) "restart": subscription restart." yes": restart the subscription if it already exists, "no": continue the previous subscription. (Please note that the executing user needs to have read/write access to the dataDir directory)
"keepProgress": keep the progress of the subscription information. yes means keep the subscription information, no means don't keep it. The value is yes and restart is no to continue the previous subscriptions. "keepProgress": keep the progress of the subscription information. yes means keep the subscription information, no means don't keep it. The value is yes and restart is no to continue the previous subscriptions.
"resubAfterConsume": Used in conjunction with keepProgress to call unsubscribe after the subscription has been consumed the appropriate number of times and to subscribe again. "resubAfterConsume": Used in conjunction with keepProgress to call unsubscribe after the subscription has been consumed the appropriate number of times and to subscribe again.
"result": the name of the file to which the query result is written. Optional, default is null, means the query result will not be written to the file. Note: The file to save the result after each sql statement cannot be renamed, and the file name will be appended with the thread number when generating the result file. "result": the name of the file to which the query result is written. Optional, default is null, means the query result will not be written to the file. Note: The file to save the result after each sql statement cannot be renamed, and the file name will be appended with the thread number when generating the result file.
```
Conclusion Conclusion
-- --
TDengine is a big data platform designed and optimized for IoT, Telematics, Industrial Internet, DevOps, etc. TDengine shows a high performance that far exceeds similar products due to the innovative data storage and query engine design in the database kernel. And withSQL syntax support and connectors for multiple programming languages (currently Java, Python, Go, C#, NodeJS, Rust, etc. are supported), it is extremely easy to use and has zero learning cost. To facilitate the operation and maintenance needs, we also provide data migration and monitoring functions and other related ecological tools and software. TDengine is a big data platform designed and optimized for IoT, Telematics, Industrial Internet, DevOps, etc. TDengine shows a high performance that far exceeds similar products due to the innovative data storage and query engine design in the database kernel. And withSQL syntax support and connectors for multiple programming languages (currently Java, Python, Go, C#, NodeJS, Rust, etc. are supported), it is extremely easy to use and has zero learning cost. To facilitate the operation and maintenance needs, we also provide data migration and monitoring functions and other related ecological tools and software.
......
...@@ -193,7 +193,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc ...@@ -193,7 +193,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
**Redirection**: No matter about dnode or TAOSC, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not a mnode by self, it will reply to the mnode EP List back. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes. **Redirection**: No matter about dnode or TAOSC, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not a mnode by self, it will reply to the mnode EP List back. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
### A Typical Data Writinfg Process ### A Typical Data Writing Process
To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process. To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process.
...@@ -244,7 +244,7 @@ The meta data of each table (including schema, tags, etc.) is also stored in vno ...@@ -244,7 +244,7 @@ The meta data of each table (including schema, tags, etc.) is also stored in vno
### Data Partitioning ### Data Partitioning
In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter `“days”`. This method of partitioning by time rang is also convenient to efficiently implement the data retention policy. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the tiered-storage. Cold/hot data can be stored in different storage meida to reduce the storage cost. In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter `“days”`. This method of partitioning by time rang is also convenient to efficiently implement the data retention policy. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the tiered-storage. Cold/hot data can be stored in different storage media to reduce the storage cost.
In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability. In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability.
......
# Efficient Data Writing # Efficient Data Writing
TDengine supports multiple ways to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in one single record or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, out-of-order data insertion, and also historical data insertion. TDengine supports multiple ways to write data, including SQL, Prometheus, Telegraf, collectd, StatsD, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in one single record or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, out-of-order data insertion, and also historical data insertion.
## <a class="anchor" id="sql"></a> Data Writing via SQL ## <a class="anchor" id="sql"></a> Data Writing via SQL
...@@ -107,11 +107,10 @@ Launch an API service for blm_prometheus with the following command: ...@@ -107,11 +107,10 @@ Launch an API service for blm_prometheus with the following command:
Assuming that the IP address of the server where blm_prometheus located is "10.1.2. 3", the URL shall be added to the configuration file of Prometheus as: Assuming that the IP address of the server where blm_prometheus located is "10.1.2. 3", the URL shall be added to the configuration file of Prometheus as:
```yaml
remote_write: remote_write:
- url: "http://10.1.2.3:8088/receive"
\- url: "http://10.1.2.3:8088/receive" ```
### Query written data of prometheus ### Query written data of prometheus
...@@ -142,141 +141,84 @@ use prometheus; ...@@ -142,141 +141,84 @@ use prometheus;
select * from apiserver_request_latencies_bucket; select * from apiserver_request_latencies_bucket;
``` ```
## <a class="anchor" id="telegraf"></a> Data Writing via Telegraf and taosAdapter
Please refer to [Official document](https://portal.influxdata.com/downloads/) for Telegraf installation.
TDengine version 2.3.0.0+ includes a stand-alone application taosAdapter in charge of receive data insertion from Telegraf.
## <a class="anchor" id="telegraf"></a> Data Writing via Telegraf Configuration:
Please add following words in /etc/telegraf/telegraf.conf. Fill 'database name' with the database name you want to store in the TDengine for Telegraf data. Please fill the values in TDengine server/cluster host, username and password fields.
[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is a popular open source tool for IT operation data collection. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Telegraf without any code, and can directly write the data collected by Telegraf into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine. ```
[[outputs.http]]
### Compile blm_telegraf From Source Code url = "http://<TDengine server/cluster host>:6041/influxdb/v1/write?db=<database name>"
method = "POST"
Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares: timeout = "5s"
username = "<TDengine's username>"
- A server running Linux OS password = "<TDengine's password>"
- Golang version 1.10 and higher installed data_format = "influx"
- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server) influx_max_line_bytes = 250
Bailongma project has a folder, blm_telegraf, which holds the Telegraf writing API. The compiling process is as follows:
```bash
cd blm_telegraf
go build
``` ```
If everything goes well, an executable of blm_telegraf will be generated in the corresponding directory. Then restart telegraf:
```
### Install Telegraf sudo systemctl start telegraf
```
At the moment, TDengine supports Telegraf version 1.7. 4 and above. Users can download the installation package on Telegraf's website according to your current operating system. The download address is as follows: https://portal.influxdata.com/downloads Now you can query the metrics data of Telegraf from TDengine.
### Configure Telegraf
Modify the TDengine-related configurations in the Telegraf configuration file /etc/telegraf/telegraf.conf.
In the output plugins section, add the [[outputs.http]] configuration:
- url: The URL provided by bailongma API service, please refer to the example section below
- data_format: "json"
- json_timestamp_units: "1ms"
In agent section:
- hostname: The machine name that distinguishes different collection devices, and it is necessary to ensure its uniqueness
- metric_batch_size: 100, which is the max number of records per batch wriiten by Telegraf allowed. Increasing the number can reduce the request sending frequency of Telegraf.
For information on how to use Telegraf to collect data and more about using Telegraf, please refer to the official [document](https://docs.influxdata.com/telegraf/v1.11/) of Telegraf.
### Launch blm_telegraf
blm_telegraf has following options, which can be set to tune configurations of blm_telegraf when launching.
```sh
--host
The ip address of TDengine server, default is null
--batch-size
blm_prometheus assembles the received telegraf data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time.
--dbname
Set a name for the database created in TDengine, blm_telegraf will automatically create a database named dbname in TDengine, and the default value is prometheus.
--dbuser
Set the user name to access TDengine, the default value is 'root '
--dbpassword Please find taosAdapter configuration and usage from `taosadapter --help` output.
Set the password to access TDengine, the default value is'taosdata ' ## <a class="anchor" id="collectd"></a> collectd 直接写入(通过 taosAdapter)
Please refer to [official document](https://collectd.org/download.shtml) for collectd installation.
--port TDengine version 2.3.0.0+ includes a stand-alone application taosAdapter in charge of receive data insertion from collectd.
The port number blm_telegraf used to serve Telegraf. Configuration:
Please add following words in /etc/collectd/collectd.conf. Please fill the value 'host' and 'port' with what the TDengine and taosAdapter using.
``` ```
LoadPlugin network
<Plugin network>
Server "<TDengine cluster/server host>" "<port for collectd>"
</Plugin>
```
Then restart collectd
```
sudo systemctl start collectd
```
Please find taosAdapter configuration and usage from `taosadapter --help` output.
## <a class="anchor" id="statsd"></a> StatsD 直接写入(通过 taosAdapter)
Please refer to [official document](https://github.com/statsd/statsd) for StatsD installation.
TDengine version 2.3.0.0+ includes a stand-alone application taosAdapter in charge of receive data insertion from StatsD.
### Example Please add following words in the config.js file. Please fill the value to 'host' and 'port' with what the TDengine and taosAdapter using.
Launch an API service for blm_telegraf with the following command
```bash
./blm_telegraf -host 127.0.0.1 -port 8089
``` ```
add "./backends/repeater" to backends section.
Assuming that the IP address of the server where blm_telegraf located is "10.1.2. 3", the URL shall be added to the configuration file of telegraf as: add { host:'<TDengine server/cluster host>', port: <port for StatsD>} to repeater section.
```yaml
url = "http://10.1.2.3:8089/telegraf"
``` ```
### Query written data of telegraf Example file:
```
The format of generated data by telegraf is as follows:
```json
{ {
"fields": { port: 8125
"usage_guest": 0, , backends: ["./backends/repeater"]
"usage_guest_nice": 0, , repeater: [{ host: '127.0.0.1', port: 6044}]
"usage_idle": 89.7897897897898,
"usage_iowait": 0,
"usage_irq": 0,
"usage_nice": 0,
"usage_softirq": 0,
"usage_steal": 0,
"usage_system": 5.405405405405405,
"usage_user": 4.804804804804805
},
"name": "cpu",
"tags": {
"cpu": "cpu2",
"host": "bogon"
},
"timestamp": 1576464360
} }
``` ```
Where the name field is the name of the time-series data collected by telegraf, and the tag field is the tag of the time-series data. blm_telegraf automatically creates a STable in TDengine with the name of the time series data, and converts the tag field into the tag value of TDengine, with Timestamp as the timestamp and fields values as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction. Please find taosAdapter configuration and usage from `taosadapter --help` output.
```mysql
use telegraf;
select * from cpu; ## <a class="anchor" id="taosadapter2-telegraf"></a> Insert data via Bailongma 2.0 and Telegraf
```
**Notice:**
TDengine 2.3.0.0+ provides taosAdapter to support Telegraf data writing. Bailongma v2 will be abandoned and no more maintained.
MQTT is a popular data transmission protocol in the IoT. TDengine can easily access the data received by MQTT Broker and write it to TDengine.
## <a class="anchor" id="emq"></a> Data Writing via EMQ Broker ## <a class="anchor" id="emq"></a> Data Writing via EMQ Broker
[EMQ](https://github.com/emqx/emqx) is an open source MQTT Broker software, with no need of coding, only to use "rules" in EMQ Dashboard for simple configuration, and MQTT data can be directly written into TDengine. EMQ X supports storing data to the TDengine by sending it to a Web service, and also provides a native TDengine driver on Enterprise Edition for direct data store. Please refer to [EMQ official documents](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine) for more details. [EMQ](https://github.com/emqx/emqx) is an open source MQTT Broker software, with no need of coding, only to use "rules" in EMQ Dashboard for simple configuration, and MQTT data can be directly written into TDengine. EMQ X supports storing data to the TDengine by sending it to a Web service, and also provides a native TDengine driver on Enterprise Edition for direct data store. Please refer to [EMQ official documents](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine) for more details.
## <a class="anchor" id="hivemq"></a> Data Writing via HiveMQ Broker ## <a class="anchor" id="hivemq"></a> Data Writing via HiveMQ Broker
[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details. [HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details.
\ No newline at end of file
...@@ -11,7 +11,7 @@ TDengine uses SQL as the query language. Applications can send SQL statements th ...@@ -11,7 +11,7 @@ TDengine uses SQL as the query language. Applications can send SQL statements th
- Time stamp aligned join query (implicit join) operations - Time stamp aligned join query (implicit join) operations
- Multiple aggregation/calculation functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff, etc - Multiple aggregation/calculation functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff, etc
For example, in TAOS shell, the records with vlotage > 215 are queried from table d1001, sorted in descending order by timestamps, and only two records are outputted. For example, in TAOS shell, the records with voltage > 215 are queried from table d1001, sorted in descending order by timestamps, and only two records are outputted.
```mysql ```mysql
taos> select * from d1001 where voltage > 215 order by ts desc limit 2; taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
......
...@@ -110,10 +110,10 @@ First, use `taos_subscribe` to create a subscription: ...@@ -110,10 +110,10 @@ First, use `taos_subscribe` to create a subscription:
```c ```c
TAOS_SUB* tsub = NULL; TAOS_SUB* tsub = NULL;
if (async) { if (async) {
  // create an asynchronized subscription, the callback function will be called every 1s   // create an asynchronous subscription, the callback function will be called every 1s
  tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000);   tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000);
} else { } else {
  // create an synchronized subscription, need to call 'taos_consume' manually   // create an synchronous subscription, need to call 'taos_consume' manually
  tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0);   tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0);
} }
``` ```
...@@ -201,7 +201,7 @@ taos_unsubscribe(tsub, keep); ...@@ -201,7 +201,7 @@ taos_unsubscribe(tsub, keep);
Its second parameter is used to decide whether to keep the progress information of subscription on the client. If this parameter is **false** (zero), the subscription can only be restarted no matter what the `restart` parameter is when `taos_subscribe` is called next time. In addition, progress information is saved in the directory {DataDir}/subscribe/. Each subscription has a file with the same name as its `topic`. Deleting a file will also lead to a new start when the corresponding subscription is created next time. Its second parameter is used to decide whether to keep the progress information of subscription on the client. If this parameter is **false** (zero), the subscription can only be restarted no matter what the `restart` parameter is when `taos_subscribe` is called next time. In addition, progress information is saved in the directory {DataDir}/subscribe/. Each subscription has a file with the same name as its `topic`. Deleting a file will also lead to a new start when the corresponding subscription is created next time.
After introducing the code, let's take a look at the actual running effect. For exmaple: After introducing the code, let's take a look at the actual running effect. For example:
- Sample code has been downloaded locally - Sample code has been downloaded locally
- TDengine has been installed on the same machine - TDengine has been installed on the same machine
......
...@@ -54,33 +54,33 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES( ...@@ -54,33 +54,33 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(
## JDBC driver version and supported TDengine and JDK versions ## JDBC driver version and supported TDengine and JDK versions
| taos-jdbcdriver | TDengine | JDK | | taos-jdbcdriver | TDengine | JDK |
| -------------------- | ----------------- | -------- | | --------------- | ------------------ | ----- |
| 2.0.33 - 2.0.34 | 2.0.3.0 and above | 1.8.x | | 2.0.33 - 2.0.34 | 2.0.3.0 and above | 1.8.x |
| 2.0.31 - 2.0.32 | 2.1.3.0 and above | 1.8.x | | 2.0.31 - 2.0.32 | 2.1.3.0 and above | 1.8.x |
| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | | 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | | 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | | 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x and above | 1.8.x | | 1.0.3 | 1.6.1.x and above | 1.8.x |
| 1.0.2 | 1.6.1.x and above | 1.8.x | | 1.0.2 | 1.6.1.x and above | 1.8.x |
| 1.0.1 | 1.6.1.x and above | 1.8.x | | 1.0.1 | 1.6.1.x and above | 1.8.x |
## DataType in TDengine and Java connector ## DataType in TDengine and Java connector
The TDengine supports the following data types and Java data types: The TDengine supports the following data types and Java data types:
| TDengine DataType | JDBCType (driver version < 2.0.24) | JDBCType (driver version >= 2.0.24) | | TDengine DataType | JDBCType (driver version < 2.0.24) | JDBCType (driver version >= 2.0.24) |
| ----------------- | ------------------ | ------------------ | | ----------------- | ---------------------------------- | ----------------------------------- |
| TIMESTAMP | java.lang.Long | java.sql.Timestamp | | TIMESTAMP | java.lang.Long | java.sql.Timestamp |
| INT | java.lang.Integer | java.lang.Integer | | INT | java.lang.Integer | java.lang.Integer |
| BIGINT | java.lang.Long | java.lang.Long | | BIGINT | java.lang.Long | java.lang.Long |
| FLOAT | java.lang.Float | java.lang.Float | | FLOAT | java.lang.Float | java.lang.Float |
| DOUBLE | java.lang.Double | java.lang.Double | | DOUBLE | java.lang.Double | java.lang.Double |
| SMALLINT | java.lang.Short | java.lang.Short | | SMALLINT | java.lang.Short | java.lang.Short |
| TINYINT | java.lang.Byte | java.lang.Byte | | TINYINT | java.lang.Byte | java.lang.Byte |
| BOOL | java.lang.Boolean | java.lang.Boolean | | BOOL | java.lang.Boolean | java.lang.Boolean |
| BINARY | java.lang.String | byte array | | BINARY | java.lang.String | byte array |
| NCHAR | java.lang.String | java.lang.String | | NCHAR | java.lang.String | java.lang.String |
## Install Java connector ## Install Java connector
...@@ -448,7 +448,7 @@ public static void main(String[] args) throws SQLException { ...@@ -448,7 +448,7 @@ public static void main(String[] args) throws SQLException {
config.setMinimumIdle(10); //minimum number of idle connection config.setMinimumIdle(10); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool
config.setMaxLifetime(0); // maximum life time for each connection config.setMaxLifetime(0); // maximum lifetime for each connection
config.setIdleTimeout(0); // max idle time for recycle idle connection config.setIdleTimeout(0); // max idle time for recycle idle connection
config.setConnectionTestQuery("select server_status()"); //validation query config.setConnectionTestQuery("select server_status()"); //validation query
HikariDataSource ds = new HikariDataSource(config); //create datasource HikariDataSource ds = new HikariDataSource(config); //create datasource
...@@ -456,7 +456,7 @@ public static void main(String[] args) throws SQLException { ...@@ -456,7 +456,7 @@ public static void main(String[] args) throws SQLException {
Statement statement = connection.createStatement(); // get statement Statement statement = connection.createStatement(); // get statement
//query or insert //query or insert
// ... // ...
connection.close(); // put back to conneciton pool connection.close(); // put back to connection pool
} }
``` ```
...@@ -480,7 +480,7 @@ public static void main(String[] args) throws Exception { ...@@ -480,7 +480,7 @@ public static void main(String[] args) throws Exception {
Statement statement = connection.createStatement(); // get statement Statement statement = connection.createStatement(); // get statement
//query or insert //query or insert
// ... // ...
connection.close(); // put back to conneciton pool connection.close(); // put back to connection pool
} }
``` ```
......
...@@ -15,7 +15,7 @@ if you use the default features, it'll depend on: ...@@ -15,7 +15,7 @@ if you use the default features, it'll depend on:
- [TDengine Client](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) library and headers. - [TDengine Client](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) library and headers.
- clang because bindgen will requires the clang AST library. - clang because bindgen will requires the clang AST library.
## Fetures ## Features
In-design features: In-design features:
......
...@@ -154,7 +154,7 @@ Under cmd, enter the c:\TDengine directory and directly execute taos.exe, and yo ...@@ -154,7 +154,7 @@ Under cmd, enter the c:\TDengine directory and directly execute taos.exe, and yo
| **CPU Type** | **x64****(****64bit****)** | | | **ARM64** | **ARM32** | | **CPU Type** | **x64****(****64bit****)** | | | **ARM64** | **ARM32** |
| -------------------- | ---------------------------- | ------- | ------- | --------- | ------------------ | | -------------------- | ---------------------------- | ------- | ------- | --------- | ------------------ |
| **OS Type** | Linux | Win64 | Win32 | Linux | Linux | | **OS Type** | Linux | Win64 | Win32 | Linux | Linux |
| **Supported or Not** | Yes | **Yes** | **Yes** | **Yes** | **In development** | | **Supported or Not** | Yes | **Yes** | **Yes** | **Yes** | **Yes** |
The C/C++ API is similar to MySQL's C API. When application use it, it needs to include the TDengine header file taos.h (after installed, it is located in/usr/local/taos/include): The C/C++ API is similar to MySQL's C API. When application use it, it needs to include the TDengine header file taos.h (after installed, it is located in/usr/local/taos/include):
...@@ -200,6 +200,8 @@ Create a database connection and initialize the connection context. The paramete ...@@ -200,6 +200,8 @@ Create a database connection and initialize the connection context. The paramete
* port: Port number * port: Port number
A null return value indicates a failure. The application needs to save the returned parameters for subsequent API calls. A null return value indicates a failure. The application needs to save the returned parameters for subsequent API calls.
Note: The same process can connect to multiple taosd processes based on ip/port
- `char *taos_get_server_info(TAOS *taos)` - `char *taos_get_server_info(TAOS *taos)`
...@@ -884,7 +886,7 @@ dotnet new console ...@@ -884,7 +886,7 @@ dotnet new console
``` cmd ``` cmd
dotnet add package TDengine.Connector dotnet add package TDengine.Connector
``` ```
* inlucde the TDnengineDriver in you application's namespace * include the TDnengineDriver in you application's namespace
```C# ```C#
using TDengineDriver; using TDengineDriver;
``` ```
......
...@@ -228,7 +228,7 @@ Note: In 2.0.15.0 and later versions, STABLE reserved words are supported. That ...@@ -228,7 +228,7 @@ Note: In 2.0.15.0 and later versions, STABLE reserved words are supported. That
```mysql ```mysql
CREATE STABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]); CREATE STABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]);
``` ```
Similiar to a standard table creation SQL, but you need to specify name and type of TAGS field. Similar to a standard table creation SQL, but you need to specify name and type of TAGS field.
Note: Note:
...@@ -673,7 +673,7 @@ Query OK, 1 row(s) in set (0.001091s) ...@@ -673,7 +673,7 @@ Query OK, 1 row(s) in set (0.001091s)
SELECT * FROM tb1 WHERE ts >= NOW - 1h; SELECT * FROM tb1 WHERE ts >= NOW - 1h;
``` ```
- Look up table tb1 from 2018-06-01 08:00:00. 000 to 2018-06-02 08:00:00. 000, and col3 string is a record ending in'nny ', and the result is in descending order of timestamp: - Look up table tb1 from 2018-06-01 08:00:00. 000 to 2018-06-02 08:00:00. 000, and col3 string is a record ending in 'nny ', and the result is in descending order of timestamp:
```mysql ```mysql
SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC; SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC;
...@@ -782,7 +782,7 @@ TDengine supports aggregations over data, they are listed below: ...@@ -782,7 +782,7 @@ TDengine supports aggregations over data, they are listed below:
Function: return the sum of a statistics/STable. Function: return the sum of a statistics/STable.
Return Data Type: long integer INMT64 and Double. Return Data Type: INT64 and Double.
Applicable Fields: All types except timestamp, binary, nchar, bool. Applicable Fields: All types except timestamp, binary, nchar, bool.
...@@ -1196,7 +1196,7 @@ SELECT function_list FROM stb_name ...@@ -1196,7 +1196,7 @@ SELECT function_list FROM stb_name
- FILL statement specifies a filling mode when data missed in a certain interval. Applicable filling modes include the following: - FILL statement specifies a filling mode when data missed in a certain interval. Applicable filling modes include the following:
1. Do not fill: NONE (default filingl mode). 1. Do not fill: NONE (default filing mode).
2. VALUE filling: Fixed value filling, where the filled value needs to be specified. For example: fill (VALUE, 1.23). 2. VALUE filling: Fixed value filling, where the filled value needs to be specified. For example: fill (VALUE, 1.23).
3. NULL filling: Fill the data with NULL. For example: fill (NULL). 3. NULL filling: Fill the data with NULL. For example: fill (NULL).
4. PREV filling: Filling data with the previous non-NULL value. For example: fill (PREV). 4. PREV filling: Filling data with the previous non-NULL value. For example: fill (PREV).
......
...@@ -28,7 +28,7 @@ The overall system architecture of a typical DevOps application scenario is show ...@@ -28,7 +28,7 @@ The overall system architecture of a typical DevOps application scenario is show
In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics, data collectors to aggregate information collected by agents, systems for data persistence storage and management, and tools for monitoring data visualization (e.g., Grafana, etc.). In this application scenario, there are Agent tools deployed in the application environment to collect machine metrics, network metrics, and application metrics, data collectors to aggregate information collected by agents, systems for data persistence storage and management, and tools for monitoring data visualization (e.g., Grafana, etc.).
Among them, Agents deployed in application nodes are responsible for providing operational metrics from different sources to collectd/Statsd, and collectd/StatsD is responsible for pushing the aggregated data to the OpenTSDB cluster system and then visualizing the data using the visualization kanban board Grafana. Among them, Agents deployed in application nodes are responsible for providing operational metrics from different sources to collectd/Statsd, and collectd/StatsD is responsible for pushing the aggregated data to the OpenTSDB cluster system and then visualizing the data using the visualization board of Grafana.
### 2. Migration Service ### 2. Migration Service
...@@ -127,11 +127,11 @@ On the one hand, TDengine requires a strict schema definition for its incoming d ...@@ -127,11 +127,11 @@ On the one hand, TDengine requires a strict schema definition for its incoming d
Now let's assume a DevOps scenario where we use collectd to collect base metrics of devices, including memory, swap, disk, etc. The schema in OpenTSDB is as follows: Now let's assume a DevOps scenario where we use collectd to collect base metrics of devices, including memory, swap, disk, etc. The schema in OpenTSDB is as follows:
| No. | metric | value | type | tag1 | tag2 | tag3 | tag4 | tag5 | | No. | metric | value | type | tag1 | tag2 | tag3 | tag4 | tag5 |
| ---- | -------------- | ------ | ------ | ---- | ----------- | -------------------- | --------- | ------ | | --- | ------ | ----- | ------ | ---- | ----------- | -------------------- | --------- | ------ |
| 1 | memory | value | double | host | memory_type | memory_type_instance | source | | | 1 | memory | value | double | host | memory_type | memory_type_instance | source | |
| 2 | swap | value | double | host | swap_type | swap_type_instance | source | | | 2 | swap | value | double | host | swap_type | swap_type_instance | source | |
| 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source | | 3 | disk | value | double | host | disk_point | disk_instance | disk_type | source |
......
[Unit] [Unit]
Description=TDengine server service Description=TDengine server service
After=network-online.target After=network-online.target taosadapter.service
Wants=network-online.target Wants=network-online.target taosadapter.service
[Service] [Service]
Type=simple Type=simple
......
...@@ -58,7 +58,6 @@ cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_pat ...@@ -58,7 +58,6 @@ cp ${compile_dir}/../packaging/tools/startPre.sh ${pkg_dir}${install_home_pat
cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
......
...@@ -18,5 +18,5 @@ ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \ ...@@ -18,5 +18,5 @@ ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
LC_ALL=en_US.UTF-8 LC_ALL=en_US.UTF-8
EXPOSE 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 EXPOSE 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042
CMD ["taosd"] CMD ["run_taosd.sh"]
VOLUME [ "/var/lib/taos", "/var/log/taos","/etc/taos/" ] VOLUME [ "/var/lib/taos", "/var/log/taos","/etc/taos/" ]
\ No newline at end of file
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os # Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
set -e set -e
#set -x set -x
# release.sh -v [cluster | edge] # release.sh -v [cluster | edge]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] # -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
...@@ -414,10 +414,14 @@ fi ...@@ -414,10 +414,14 @@ fi
if [[ "$httpdBuild" == "true" ]]; then if [[ "$httpdBuild" == "true" ]]; then
BUILD_HTTP=true BUILD_HTTP=true
BUILD_TOOLS=false
else else
BUILD_HTTP=false BUILD_HTTP=false
fi
if [[ "$pagMode" == "full" ]]; then
BUILD_TOOLS=true BUILD_TOOLS=true
else
BUILD_TOOLS=false
fi fi
# check support cpu type # check support cpu type
...@@ -514,15 +518,17 @@ if [ "$osType" != "Darwin" ]; then ...@@ -514,15 +518,17 @@ if [ "$osType" != "Darwin" ]; then
cd ${script_dir}/deb cd ${script_dir}/deb
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then if [[ "$pagMode" == "full" ]]; then
cd ${top_dir}/src/kit/taos-tools/packaging/deb if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" cd ${top_dir}/src/kit/taos-tools/packaging/deb
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
taos_tools_ver=$(git describe --tags|sed -e 's/ver-//g') taos_tools_ver=$(git describe --tags|sed -e 's/ver-//g'|awk -F '-' '{print $1}')
${csudo} ./make-taos-tools-deb.sh ${top_dir} \ ${csudo} ./make-taos-tools-deb.sh ${top_dir} \
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
fi
fi fi
else else
echo "==========dpkg command not exist, so not release deb package!!!" echo "==========dpkg command not exist, so not release deb package!!!"
fi fi
ret='0' ret='0'
...@@ -537,15 +543,17 @@ if [ "$osType" != "Darwin" ]; then ...@@ -537,15 +543,17 @@ if [ "$osType" != "Darwin" ]; then
cd ${script_dir}/rpm cd ${script_dir}/rpm
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType} ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
if [ -d ${top_dir}/src/kit/taos-tools/packaging/rpm ]; then if [[ "$pagMode" == "full" ]]; then
cd ${top_dir}/src/kit/taos-tools/packaging/rpm if [ -d ${top_dir}/src/kit/taos-tools/packaging/rpm ]; then
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0" cd ${top_dir}/src/kit/taos-tools/packaging/rpm
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
taos_tools_ver=$(git describe --tags|sed -e 's/ver-//g'|sed -e 's/-/_/g') taos_tools_ver=$(git describe --tags|sed -e 's/ver-//g'|awk -F '-' '{print $1}'|sed -e 's/-/_/g')
${csudo} ./make-taos-tools-rpm.sh ${top_dir} \ ${csudo} ./make-taos-tools-rpm.sh ${top_dir} \
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType} ${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
fi
fi fi
else else
echo "==========rpmbuild command not exist, so not release rpm package!!!" echo "==========rpmbuild command not exist, so not release rpm package!!!"
fi fi
fi fi
......
...@@ -71,7 +71,6 @@ cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin ...@@ -71,7 +71,6 @@ cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
if [ -f %{_compiledir}/build/bin/taosadapter ]; then if [ -f %{_compiledir}/build/bin/taosadapter ]; then
cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||: cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
fi fi
cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taosdef.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taosdef.h %{buildroot}%{homepath}/include
...@@ -196,7 +195,6 @@ if [ $1 -eq 0 ];then ...@@ -196,7 +195,6 @@ if [ $1 -eq 0 ];then
${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosadapter || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${cfg_link_dir}/* || : ${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || : ${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taosdef.h || : ${csudo} rm -f ${inc_link_dir}/taosdef.h || :
......
...@@ -185,23 +185,25 @@ function install_bin() { ...@@ -185,23 +185,25 @@ function install_bin() {
# Remove links # Remove links
${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosadapter || :
${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/run_taosd.sh || :
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
#Make link #Make link
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
[ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
[ -x ${install_main_dir}/bin/taosadapter ] && ${csudo} ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || : [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo} ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
...@@ -887,8 +889,8 @@ function update_TDengine() { ...@@ -887,8 +889,8 @@ function update_TDengine() {
fi fi
tar -zxf taos.tar.gz tar -zxf taos.tar.gz
install_jemalloc install_jemalloc
install_avro lib #install_avro lib
install_avro lib64 #install_avro lib64
echo -e "${GREEN}Start to update TDengine...${NC}" echo -e "${GREEN}Start to update TDengine...${NC}"
# Stop the service if running # Stop the service if running
...@@ -1001,8 +1003,8 @@ function install_TDengine() { ...@@ -1001,8 +1003,8 @@ function install_TDengine() {
install_header install_header
install_lib install_lib
install_jemalloc install_jemalloc
install_avro lib #install_avro lib
install_avro lib64 #install_avro lib64
if [ "$pagMode" != "lite" ]; then if [ "$pagMode" != "lite" ]; then
install_connector install_connector
......
...@@ -188,9 +188,6 @@ function update() { ...@@ -188,9 +188,6 @@ function update() {
install_log install_log
install_header install_header
install_lib install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples install_examples
install_bin install_bin
install_config install_config
...@@ -215,9 +212,6 @@ function install() { ...@@ -215,9 +212,6 @@ function install() {
install_log install_log
install_header install_header
install_lib install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples install_examples
install_bin install_bin
install_config install_config
......
...@@ -189,9 +189,6 @@ function update() { ...@@ -189,9 +189,6 @@ function update() {
install_log install_log
install_header install_header
install_lib install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples install_examples
install_bin install_bin
install_config install_config
...@@ -216,9 +213,6 @@ function install() { ...@@ -216,9 +213,6 @@ function install() {
install_log install_log
install_header install_header
install_lib install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples install_examples
install_bin install_bin
install_config install_config
......
...@@ -247,9 +247,6 @@ function update_PowerDB() { ...@@ -247,9 +247,6 @@ function update_PowerDB() {
install_log install_log
install_header install_header
install_lib install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples install_examples
install_bin install_bin
install_config install_config
...@@ -275,9 +272,6 @@ function install_PowerDB() { ...@@ -275,9 +272,6 @@ function install_PowerDB() {
install_header install_header
install_lib install_lib
install_jemalloc install_jemalloc
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples install_examples
install_bin install_bin
install_config install_config
......
...@@ -189,9 +189,6 @@ function update_prodb() { ...@@ -189,9 +189,6 @@ function update_prodb() {
install_log install_log
install_header install_header
install_lib install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples install_examples
install_bin install_bin
install_config install_config
...@@ -216,9 +213,6 @@ function install_prodb() { ...@@ -216,9 +213,6 @@ function install_prodb() {
install_log install_log
install_header install_header
install_lib install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples install_examples
install_bin install_bin
install_config install_config
......
...@@ -193,9 +193,6 @@ function update_tq() { ...@@ -193,9 +193,6 @@ function update_tq() {
install_log install_log
install_header install_header
install_lib install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples install_examples
install_bin install_bin
install_config install_config
...@@ -220,9 +217,6 @@ function install_tq() { ...@@ -220,9 +217,6 @@ function install_tq() {
install_log install_log
install_header install_header
install_lib install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples install_examples
install_bin install_bin
install_config install_config
......
...@@ -172,6 +172,7 @@ function install_bin() { ...@@ -172,6 +172,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/rmjh || : ${csudo} rm -f ${bin_link_dir}/rmjh || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/run_taosd.sh || :
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
...@@ -181,6 +182,7 @@ function install_bin() { ...@@ -181,6 +182,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/jhdemo ] && ${csudo} ln -s ${install_main_dir}/bin/jhdemo ${bin_link_dir}/jhdemo || : [ -x ${install_main_dir}/bin/jhdemo ] && ${csudo} ln -s ${install_main_dir}/bin/jhdemo ${bin_link_dir}/jhdemo || :
[ -x ${install_main_dir}/bin/remove_jh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_jh.sh ${bin_link_dir}/rmjh || : [ -x ${install_main_dir}/bin/remove_jh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_jh.sh ${bin_link_dir}/rmjh || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
......
...@@ -172,6 +172,7 @@ function install_bin() { ...@@ -172,6 +172,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/rmkh || : ${csudo} rm -f ${bin_link_dir}/rmkh || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/run_taosd.sh || :
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
...@@ -181,6 +182,7 @@ function install_bin() { ...@@ -181,6 +182,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/khdemo ] && ${csudo} ln -s ${install_main_dir}/bin/khdemo ${bin_link_dir}/khdemo || : [ -x ${install_main_dir}/bin/khdemo ] && ${csudo} ln -s ${install_main_dir}/bin/khdemo ${bin_link_dir}/khdemo || :
[ -x ${install_main_dir}/bin/remove_kh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_kh.sh ${bin_link_dir}/rmkh || : [ -x ${install_main_dir}/bin/remove_kh.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_kh.sh ${bin_link_dir}/rmkh || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
......
...@@ -177,6 +177,7 @@ function install_bin() { ...@@ -177,6 +177,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/rmpower || : ${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/run_taosd.sh || :
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
...@@ -186,6 +187,7 @@ function install_bin() { ...@@ -186,6 +187,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || : [ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
[ -x ${install_main_dir}/bin/remove_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_power.sh ${bin_link_dir}/rmpower || : [ -x ${install_main_dir}/bin/remove_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_power.sh ${bin_link_dir}/rmpower || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
......
...@@ -172,6 +172,7 @@ function install_bin() { ...@@ -172,6 +172,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/rmprodb || : ${csudo} rm -f ${bin_link_dir}/rmprodb || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/run_taosd.sh || :
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
...@@ -181,6 +182,7 @@ function install_bin() { ...@@ -181,6 +182,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/prodemo ] && ${csudo} ln -s ${install_main_dir}/bin/prodemo ${bin_link_dir}/prodemo || : [ -x ${install_main_dir}/bin/prodemo ] && ${csudo} ln -s ${install_main_dir}/bin/prodemo ${bin_link_dir}/prodemo || :
[ -x ${install_main_dir}/bin/remove_pro.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_pro.sh ${bin_link_dir}/rmprodb || : [ -x ${install_main_dir}/bin/remove_pro.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_pro.sh ${bin_link_dir}/rmprodb || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
......
...@@ -177,6 +177,7 @@ function install_bin() { ...@@ -177,6 +177,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/rmtq || : ${csudo} rm -f ${bin_link_dir}/rmtq || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/run_taosd.sh || :
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
...@@ -186,6 +187,7 @@ function install_bin() { ...@@ -186,6 +187,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/tqdemo ] && ${csudo} ln -s ${install_main_dir}/bin/tqdemo ${bin_link_dir}/tqdemo || : [ -x ${install_main_dir}/bin/tqdemo ] && ${csudo} ln -s ${install_main_dir}/bin/tqdemo ${bin_link_dir}/tqdemo || :
[ -x ${install_main_dir}/bin/remove_tq.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_tq.sh ${bin_link_dir}/rmtq || : [ -x ${install_main_dir}/bin/remove_tq.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_tq.sh ${bin_link_dir}/rmtq || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then
......
...@@ -165,6 +165,7 @@ function install_bin() { ...@@ -165,6 +165,7 @@ function install_bin() {
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/perfMonitor || : ${csudo} rm -f ${bin_link_dir}/perfMonitor || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/run_taosd.sh || :
${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin ${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin
...@@ -172,6 +173,7 @@ function install_bin() { ...@@ -172,6 +173,7 @@ function install_bin() {
${csudo} cp -r ${script_dir}/remove.sh ${install_main_dir}/bin ${csudo} cp -r ${script_dir}/remove.sh ${install_main_dir}/bin
${csudo} cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin ${csudo} cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin
${csudo} cp -r ${script_dir}/run_taosd.sh ${install_main_dir}/bin
${csudo} cp -r ${script_dir}/startPre.sh ${install_main_dir}/bin ${csudo} cp -r ${script_dir}/startPre.sh ${install_main_dir}/bin
${csudo} chmod 0555 ${install_main_dir}/bin/* ${csudo} chmod 0555 ${install_main_dir}/bin/*
...@@ -183,6 +185,7 @@ function install_bin() { ...@@ -183,6 +185,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || : [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/run_taosd.sh ] && ${csudo} ln -s ${install_main_dir}/bin/run_taosd.sh ${bin_link_dir}/run_taosd.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
else else
......
...@@ -45,8 +45,10 @@ if [ "$osType" != "Darwin" ]; then ...@@ -45,8 +45,10 @@ if [ "$osType" != "Darwin" ]; then
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else else
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdemo \ bin_files="${script_dir}/remove_client.sh \
${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb" ${script_dir}/set_core.sh \
${script_dir}/get_client.sh"
#${script_dir}/get_client.sh ${script_dir}/taosd-dump-cfg.gdb"
fi fi
lib_files="${build_dir}/lib/libtaos.so.${version}" lib_files="${build_dir}/lib/libtaos.so.${version}"
else else
......
...@@ -69,15 +69,15 @@ if [ "$osType" != "Darwin" ]; then ...@@ -69,15 +69,15 @@ if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos
cp ${script_dir}/remove_jh.sh ${install_dir}/bin cp ${script_dir}/remove_client_jh.sh ${install_dir}/bin
else else
cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos cp ${build_dir}/bin/taos ${install_dir}/bin/jh_taos
cp ${script_dir}/remove_jh.sh ${install_dir}/bin cp ${script_dir}/remove_client_jh.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/jhdemo cp ${build_dir}/bin/taosdemo ${install_dir}/bin/jhdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/jh_taosdump cp ${build_dir}/bin/taosdump ${install_dir}/bin/jh_taosdump
cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin
cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
fi fi
else else
cp ${bin_files} ${install_dir}/bin cp ${bin_files} ${install_dir}/bin
......
...@@ -69,15 +69,15 @@ if [ "$osType" != "Darwin" ]; then ...@@ -69,15 +69,15 @@ if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
cp ${build_dir}/bin/taos ${install_dir}/bin/khclient cp ${build_dir}/bin/taos ${install_dir}/bin/khclient
cp ${script_dir}/remove_kh.sh ${install_dir}/bin cp ${script_dir}/remove_client_kh.sh ${install_dir}/bin
else else
cp ${build_dir}/bin/taos ${install_dir}/bin/khclient cp ${build_dir}/bin/taos ${install_dir}/bin/khclient
cp ${script_dir}/remove_kh.sh ${install_dir}/bin cp ${script_dir}/remove_client_kh.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/khdemo cp ${build_dir}/bin/taosdemo ${install_dir}/bin/khdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/khdump cp ${build_dir}/bin/taosdump ${install_dir}/bin/khdump
cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin
cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
fi fi
else else
cp ${bin_files} ${install_dir}/bin cp ${bin_files} ${install_dir}/bin
......
...@@ -109,15 +109,15 @@ if [ "$osType" != "Darwin" ]; then ...@@ -109,15 +109,15 @@ if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${script_dir}/remove_client_power.sh ${install_dir}/bin
else else
cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${script_dir}/remove_client_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin
cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
fi fi
else else
cp ${bin_files} ${install_dir}/bin cp ${bin_files} ${install_dir}/bin
......
...@@ -69,15 +69,15 @@ if [ "$osType" != "Darwin" ]; then ...@@ -69,15 +69,15 @@ if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc
cp ${script_dir}/remove_pro.sh ${install_dir}/bin cp ${script_dir}/remove_client_pro.sh ${install_dir}/bin
else else
cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc
cp ${script_dir}/remove_pro.sh ${install_dir}/bin cp ${script_dir}/remove_client_pro.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/prodemo cp ${build_dir}/bin/taosdemo ${install_dir}/bin/prodemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump
cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin
cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
fi fi
else else
cp ${bin_files} ${install_dir}/bin cp ${bin_files} ${install_dir}/bin
......
...@@ -69,15 +69,15 @@ if [ "$osType" != "Darwin" ]; then ...@@ -69,15 +69,15 @@ if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
cp ${build_dir}/bin/taos ${install_dir}/bin/tq cp ${build_dir}/bin/taos ${install_dir}/bin/tq
cp ${script_dir}/remove_tq.sh ${install_dir}/bin cp ${script_dir}/remove_client_tq.sh ${install_dir}/bin
else else
cp ${build_dir}/bin/taos ${install_dir}/bin/tq cp ${build_dir}/bin/taos ${install_dir}/bin/tq
cp ${script_dir}/remove_tq.sh ${install_dir}/bin cp ${script_dir}/remove_client_tq.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/tqdump cp ${build_dir}/bin/taosdump ${install_dir}/bin/tqdump
cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin
cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin #cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
fi fi
else else
cp ${bin_files} ${install_dir}/bin cp ${bin_files} ${install_dir}/bin
......
...@@ -31,23 +31,38 @@ else ...@@ -31,23 +31,38 @@ else
install_dir="${release_dir}/TDengine-server-${version}" install_dir="${release_dir}/TDengine-server-${version}"
fi fi
if [ -d ${top_dir}/src/kit/taos-tools/packaging/deb ]; then
cd ${top_dir}/src/kit/taos-tools/packaging/deb
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
taostools_ver=$(git describe --tags|sed -e 's/ver-//g'|awk -F '-' '{print $1}')
taostools_install_dir="${release_dir}/taos-tools-${taostools_ver}"
cd ${curr_dir}
else
taostools_install_dir="${release_dir}/taos-tools-${version}"
fi
# Directories and files # Directories and files
if [ "$pagMode" == "lite" ]; then if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos strip ${build_dir}/bin/taos
# lite version doesn't include taosadapter, which will lead to no restful interface # lite version doesn't include taosadapter, which will lead to no restful interface
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh ${script_dir}/startPre.sh" bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh ${script_dir}/startPre.sh"
taostools_bin_files=""
else else
bin_files="${build_dir}/bin/taosd \ bin_files="${build_dir}/bin/taosd \
${build_dir}/bin/taos \ ${build_dir}/bin/taos \
${build_dir}/bin/taosadapter \ ${build_dir}/bin/taosadapter \
${build_dir}/bin/taosdump \
${build_dir}/bin/taosdemo \
${build_dir}/bin/tarbitrator\ ${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh \ ${script_dir}/remove.sh \
${script_dir}/set_core.sh \ ${script_dir}/set_core.sh \
${script_dir}/run_taosd.sh \
${script_dir}/startPre.sh \ ${script_dir}/startPre.sh \
${script_dir}/taosd-dump-cfg.gdb" ${script_dir}/taosd-dump-cfg.gdb"
taostools_bin_files=" ${build_dir}/bin/taosdump \
${build_dir}/bin/taosBenchmark"
fi fi
lib_files="${build_dir}/lib/libtaos.so.${version}" lib_files="${build_dir}/lib/libtaos.so.${version}"
...@@ -78,6 +93,7 @@ mkdir -p ${install_dir} ...@@ -78,6 +93,7 @@ mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
cp ${compile_dir}/test/cfg/taosadapter.toml ${install_dir}/cfg || : cp ${compile_dir}/test/cfg/taosadapter.toml ${install_dir}/cfg || :
fi fi
...@@ -102,10 +118,29 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taos ...@@ -102,10 +118,29 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taos
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
if [ -f ${build_dir}/lib/libavro.so.23.0.0 ]; then if [ -n "${taostools_bin_files}" ]; then
mkdir -p ${install_dir}/avro/{lib,lib/pkgconfig} mkdir -p ${taostools_install_dir} || echo -e "failed to create ${taostools_install_dir}"
cp ${build_dir}/lib/libavro.* ${install_dir}/avro/lib mkdir -p ${taostools_install_dir}/bin \
cp ${build_dir}/lib/pkgconfig/avro-c.pc ${install_dir}/avro/lib/pkgconfig && cp ${taostools_bin_files} ${taostools_install_dir}/bin \
&& chmod a+x ${taostools_install_dir}/bin/* || :
[ -f ${taostools_install_dir}/bin/taosBenchmark ] && \
ln -sf ${taostools_install_dir}/bin/taosBenchmark \
${taostools_install_dir}/bin/taosdemo
if [ -f ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh ]; then
cp ${top_dir}/src/kit/taos-tools/packaging/tools/install-taostools.sh \
${taostools_install_dir}/ > /dev/null \
&& chmod a+x ${taostools_install_dir}/install-taostools.sh \
|| echo -e "failed to copy install-taostools.sh"
else
echo -e "install-taostools.sh not found"
fi
if [ -f ${build_dir}/lib/libavro.so.23.0.0 ]; then
mkdir -p ${taostools_install_dir}/avro/{lib,lib/pkgconfig} || echo -e "failed to create ${taostools_install_dir}/avro"
cp ${build_dir}/lib/libavro.* ${taostools_install_dir}/avro/lib
cp ${build_dir}/lib/pkgconfig/avro-c.pc ${taostools_install_dir}/avro/lib/pkgconfig
fi
fi fi
if [ -f ${build_dir}/bin/jemalloc-config ]; then if [ -f ${build_dir}/bin/jemalloc-config ]; then
...@@ -235,6 +270,8 @@ cd ${release_dir} ...@@ -235,6 +270,8 @@ cd ${release_dir}
# install_dir has been distinguishes cluster from edege, so comments this code # install_dir has been distinguishes cluster from edege, so comments this code
pkg_name=${install_dir}-${osType}-${cpuType} pkg_name=${install_dir}-${osType}-${cpuType}
taostools_pkg_name=${taostools_install_dir}-${osType}-${cpuType}
# if [ "$verMode" == "cluster" ]; then # if [ "$verMode" == "cluster" ]; then
# pkg_name=${install_dir}-${osType}-${cpuType} # pkg_name=${install_dir}-${osType}-${cpuType}
# elif [ "$verMode" == "edge" ]; then # elif [ "$verMode" == "edge" ]; then
...@@ -246,8 +283,10 @@ pkg_name=${install_dir}-${osType}-${cpuType} ...@@ -246,8 +283,10 @@ pkg_name=${install_dir}-${osType}-${cpuType}
if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then if [[ "$verType" == "beta" ]] || [[ "$verType" == "preRelease" ]]; then
pkg_name=${install_dir}-${verType}-${osType}-${cpuType} pkg_name=${install_dir}-${verType}-${osType}-${cpuType}
taostools_pkg_name=${taostools_install_dir}-${verType}-${osType}-${cpuType}
elif [ "$verType" == "stable" ]; then elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name} pkg_name=${pkg_name}
taostools_pkg_name=${taostools_pkg_name}
else else
echo "unknow verType, nor stabel or beta" echo "unknow verType, nor stabel or beta"
exit 1 exit 1
...@@ -257,11 +296,20 @@ if [ "$pagMode" == "lite" ]; then ...@@ -257,11 +296,20 @@ if [ "$pagMode" == "lite" ]; then
pkg_name=${pkg_name}-Lite pkg_name=${pkg_name}-Lite
fi fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : tar -zcv -f "$(basename ${pkg_name}).tar.gz" "$(basename ${install_dir})" --remove-files || :
exitcode=$? exitcode=$?
if [ "$exitcode" != "0" ]; then if [ "$exitcode" != "0" ]; then
echo "tar ${pkg_name}.tar.gz error !!!" echo "tar ${pkg_name}.tar.gz error !!!"
exit $exitcode exit $exitcode
fi fi
if [ -n "${taostools_bin_files}" ]; then
tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${taostools_pkg_name}.tar.gz error !!!"
exit $exitcode
fi
fi
cd ${curr_dir} cd ${curr_dir}
...@@ -56,6 +56,7 @@ else ...@@ -56,6 +56,7 @@ else
cp ${build_dir}/bin/taosdump ${install_dir}/bin/jh_taosdump cp ${build_dir}/bin/taosdump ${install_dir}/bin/jh_taosdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/run_taosd.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin
cp ${script_dir}/startPre.sh ${install_dir}/bin cp ${script_dir}/startPre.sh ${install_dir}/bin
cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
......
...@@ -56,6 +56,7 @@ else ...@@ -56,6 +56,7 @@ else
cp ${build_dir}/bin/taosdump ${install_dir}/bin/khdump cp ${build_dir}/bin/taosdump ${install_dir}/bin/khdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/run_taosd.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin
cp ${script_dir}/startPre.sh ${install_dir}/bin cp ${script_dir}/startPre.sh ${install_dir}/bin
cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
......
...@@ -64,12 +64,13 @@ else ...@@ -64,12 +64,13 @@ else
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh" # bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh"
cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||: cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||:
cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/run_taosd.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin
cp ${script_dir}/startPre.sh ${install_dir}/bin cp ${script_dir}/startPre.sh ${install_dir}/bin
cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
......
...@@ -56,6 +56,7 @@ else ...@@ -56,6 +56,7 @@ else
cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump cp ${build_dir}/bin/taosdump ${install_dir}/bin/prodump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/run_taosd.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin
cp ${script_dir}/startPre.sh ${install_dir}/bin cp ${script_dir}/startPre.sh ${install_dir}/bin
cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
......
...@@ -61,6 +61,7 @@ else ...@@ -61,6 +61,7 @@ else
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/run_taosd.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin cp ${script_dir}/get_client.sh ${install_dir}/bin
cp ${script_dir}/startPre.sh ${install_dir}/bin cp ${script_dir}/startPre.sh ${install_dir}/bin
cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin cp ${script_dir}/taosd-dump-cfg.gdb ${install_dir}/bin
......
...@@ -27,6 +27,7 @@ install_nginxd_dir="/usr/local/nginxd" ...@@ -27,6 +27,7 @@ install_nginxd_dir="/usr/local/nginxd"
service_config_dir="/etc/systemd/system" service_config_dir="/etc/systemd/system"
taos_service_name="taosd" taos_service_name="taosd"
taosadapter_service_name="taosadapter"
tarbitrator_service_name="tarbitratord" tarbitrator_service_name="tarbitratord"
nginx_service_name="nginxd" nginx_service_name="nginxd"
csudo="" csudo=""
...@@ -78,12 +79,13 @@ function clean_bin() { ...@@ -78,12 +79,13 @@ function clean_bin() {
# Remove link # Remove link
${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosadapter || :
${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/run_taosd.sh || :
} }
function clean_lib() { function clean_lib() {
...@@ -112,14 +114,20 @@ function clean_log() { ...@@ -112,14 +114,20 @@ function clean_log() {
function clean_service_on_systemd() { function clean_service_on_systemd() {
taosd_service_config="${service_config_dir}/${taos_service_name}.service" taosd_service_config="${service_config_dir}/${taos_service_name}.service"
taosadapter_service_config="${service_config_dir}/taosadapter.service"
if systemctl is-active --quiet ${taos_service_name}; then if systemctl is-active --quiet ${taos_service_name}; then
echo "TDengine taosd is running, stopping it..." echo "TDengine taosd is running, stopping it..."
${csudo} systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null ${csudo} systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null
fi fi
${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null ${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${taosd_service_config} ${csudo} rm -f ${taosd_service_config}
[ -f ${taosadapter_service_config} ] && ${sudo} rm -f ${taosadapter_service_config}
taosadapter_service_config="${service_config_dir}/taosadapter.service"
if systemctl is-active --quiet ${taosadapter_service_name}; then
echo "TDengine taosAdapter is running, stopping it..."
${csudo} systemctl stop ${taosadapter_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${taosadapter_service_name} &> /dev/null || echo &> /dev/null
[ -f ${taosadapter_service_config} ] && ${csudo} rm -f ${taosadapter_service_config}
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
if systemctl is-active --quiet ${tarbitrator_service_name}; then if systemctl is-active --quiet ${tarbitrator_service_name}; then
......
...@@ -70,10 +70,11 @@ function clean_bin() { ...@@ -70,10 +70,11 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/jh_taos || : ${csudo} rm -f ${bin_link_dir}/jh_taos || :
${csudo} rm -f ${bin_link_dir}/jh_taosd || : ${csudo} rm -f ${bin_link_dir}/jh_taosd || :
${csudo} rm -f ${bin_link_dir}/jhdemo || : ${csudo} rm -f ${bin_link_dir}/jhdemo || :
${csudo} rm -f ${bin_link_dir}/jh_taosdump || : ${csudo} rm -f ${bin_link_dir}/jh_taosdump || :
${csudo} rm -f ${bin_link_dir}/rmjh || : ${csudo} rm -f ${bin_link_dir}/rmjh || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/run_taosd.sh || :
} }
function clean_lib() { function clean_lib() {
......
...@@ -74,6 +74,7 @@ function clean_bin() { ...@@ -74,6 +74,7 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/rmkh || : ${csudo} rm -f ${bin_link_dir}/rmkh || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/run_taosd.sh || :
} }
function clean_lib() { function clean_lib() {
......
...@@ -76,6 +76,7 @@ function clean_bin() { ...@@ -76,6 +76,7 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/rmpower || : ${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/run_taosd.sh || :
} }
function clean_lib() { function clean_lib() {
......
...@@ -74,6 +74,7 @@ function clean_bin() { ...@@ -74,6 +74,7 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/rmprodb || : ${csudo} rm -f ${bin_link_dir}/rmprodb || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/run_taosd.sh || :
} }
function clean_lib() { function clean_lib() {
......
...@@ -76,6 +76,7 @@ function clean_bin() { ...@@ -76,6 +76,7 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/rmtq || : ${csudo} rm -f ${bin_link_dir}/rmtq || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
${csudo} rm -f ${bin_link_dir}/run_taosd.sh || :
} }
function clean_lib() { function clean_lib() {
......
#!/bin/bash
[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter &
taosd
...@@ -42,7 +42,7 @@ int32_t tscHandleInsertRetry(SSqlObj* parent, SSqlObj* child); ...@@ -42,7 +42,7 @@ int32_t tscHandleInsertRetry(SSqlObj* parent, SSqlObj* child);
void tscBuildResFromSubqueries(SSqlObj *pSql); void tscBuildResFromSubqueries(SSqlObj *pSql);
TAOS_ROW doSetResultRowData(SSqlObj *pSql); TAOS_ROW doSetResultRowData(SSqlObj *pSql);
char *getArithmeticInputSrc(void *param, const char *name, int32_t colId); char *getScalarExprInputSrc(void *param, const char *name, int32_t colId);
void tscLockByThread(int64_t *lockedBy); void tscLockByThread(int64_t *lockedBy);
......
...@@ -35,12 +35,12 @@ extern "C" { ...@@ -35,12 +35,12 @@ extern "C" {
#define UTIL_TABLE_IS_CHILD_TABLE(metaInfo) \ #define UTIL_TABLE_IS_CHILD_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE)) (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE))
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \ #define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE)) (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE))
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo) || UTIL_TABLE_IS_TMP_TABLE(metaInfo)))
#define UTIL_GET_VGROUPMAP(pSql) \ #define UTIL_GET_VGROUPMAP(pSql) \
(pSql->pTscObj->pClusterInfo->vgroupMap) (pSql->pTscObj->pClusterInfo->vgroupMap)
...@@ -256,7 +256,7 @@ void tscColumnListDestroy(SArray* pColList); ...@@ -256,7 +256,7 @@ void tscColumnListDestroy(SArray* pColList);
void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid); void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid);
void tscColumnListCopyAll(SArray* dst, const SArray* src); void tscColumnListCopyAll(SArray* dst, const SArray* src);
void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId, bool convertNchar); void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId, bool convertNchar, bool convertJson);
void tscDequoteAndTrimToken(SStrToken* pToken); void tscDequoteAndTrimToken(SStrToken* pToken);
void tscRmEscapeAndTrimToken(SStrToken* pToken); void tscRmEscapeAndTrimToken(SStrToken* pToken);
...@@ -264,7 +264,7 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded) ...@@ -264,7 +264,7 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded)
void tscIncStreamExecutionCount(void* pStream); void tscIncStreamExecutionCount(void* pStream);
bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t numOfParams); bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId);
// get starter position of metric query condition (query on tags) in SSqlCmd.payload // get starter position of metric query condition (query on tags) in SSqlCmd.payload
SCond* tsGetSTableQueryCond(STagCond* pCond, uint64_t uid); SCond* tsGetSTableQueryCond(STagCond* pCond, uint64_t uid);
...@@ -364,7 +364,7 @@ bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx); ...@@ -364,7 +364,7 @@ bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx);
bool tscSetSqlOwner(SSqlObj* pSql); bool tscSetSqlOwner(SSqlObj* pSql);
void tscClearSqlOwner(SSqlObj* pSql); void tscClearSqlOwner(SSqlObj* pSql);
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize); int32_t doScalarExprCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize);
char* serializeTagData(STagData* pTagData, char* pMsg); char* serializeTagData(STagData* pTagData, char* pMsg);
int32_t copyTagData(STagData* dst, const STagData* src); int32_t copyTagData(STagData* dst, const STagData* src);
...@@ -394,6 +394,9 @@ void tscRemoveCachedTableMeta(STableMetaInfo* pTableMetaInfo, uint64_t id); ...@@ -394,6 +394,9 @@ void tscRemoveCachedTableMeta(STableMetaInfo* pTableMetaInfo, uint64_t id);
char* cloneCurrentDBName(SSqlObj* pSql); char* cloneCurrentDBName(SSqlObj* pSql);
int parseJsontoTagData(char* json, SKVRowBuilder* kvRowBuilder, char* errMsg, int16_t startColId);
int8_t jsonType2DbType(double data, int jsonType);
void getJsonKey(SStrToken *t0);
char* cloneCurrentDBName(SSqlObj* pSql); char* cloneCurrentDBName(SSqlObj* pSql);
......
...@@ -115,8 +115,9 @@ typedef struct SParsedDataColInfo { ...@@ -115,8 +115,9 @@ typedef struct SParsedDataColInfo {
int16_t numOfCols; int16_t numOfCols;
int16_t numOfBound; int16_t numOfBound;
uint16_t flen; // TODO: get from STSchema uint16_t flen; // TODO: get from STSchema
uint16_t allNullLen; // TODO: get from STSchema uint16_t allNullLen; // TODO: get from STSchema(base on SDataRow)
uint16_t extendedVarLen; uint16_t extendedVarLen;
uint16_t boundNullLen; // bound column len with all NULL value(without VarDataOffsetT/SColIdx part)
int32_t * boundedColumns; // bound column idx according to schema int32_t * boundedColumns; // bound column idx according to schema
SBoundColumn * cols; SBoundColumn * cols;
SBoundIdxInfo *colIdxInfo; SBoundIdxInfo *colIdxInfo;
...@@ -132,7 +133,7 @@ typedef struct { ...@@ -132,7 +133,7 @@ typedef struct {
typedef struct { typedef struct {
uint8_t memRowType; // default is 0, that is SDataRow uint8_t memRowType; // default is 0, that is SDataRow
uint8_t compareStat; // 0 no need, 1 need compare uint8_t compareStat; // 0 no need, 1 need compare
TDRowTLenT kvRowInitLen; int32_t rowSize;
SMemRowInfo *rowInfo; SMemRowInfo *rowInfo;
} SMemRowBuilder; } SMemRowBuilder;
...@@ -150,8 +151,7 @@ typedef struct { ...@@ -150,8 +151,7 @@ typedef struct {
int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec); int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec);
int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols, int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, SParsedDataColInfo *pColInfo);
int32_t allNullLen);
void destroyMemRowBuilder(SMemRowBuilder *pBuilder); void destroyMemRowBuilder(SMemRowBuilder *pBuilder);
/** /**
...@@ -453,7 +453,7 @@ void tscRestoreFuncForSTableQuery(SQueryInfo *pQueryInfo); ...@@ -453,7 +453,7 @@ void tscRestoreFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo); int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo, bool converted); void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo, bool converted);
void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock, bool convertNchar); void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock, bool convertNchar, bool convertJson);
void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pParent); void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pParent);
void destroyTableNameList(SInsertStatementParam* pInsertParam); void destroyTableNameList(SInsertStatementParam* pInsertParam);
...@@ -499,6 +499,8 @@ bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes); ...@@ -499,6 +499,8 @@ bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols); void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols);
char *tscGetErrorMsgPayload(SSqlCmd *pCmd); char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
int32_t tscGetErrorMsgLength(SSqlCmd* pCmd);
int32_t tscErrorMsgWithCode(int32_t code, char* dstBuffer, const char* errMsg, const char* sql); int32_t tscErrorMsgWithCode(int32_t code, char* dstBuffer, const char* errMsg, const char* sql);
int32_t tscInvalidOperationMsg(char *msg, const char *additionalInfo, const char *sql); int32_t tscInvalidOperationMsg(char *msg, const char *additionalInfo, const char *sql);
...@@ -531,16 +533,6 @@ static FORCE_INLINE int32_t getExtendedRowSize(STableDataBlocks *pBlock) { ...@@ -531,16 +533,6 @@ static FORCE_INLINE int32_t getExtendedRowSize(STableDataBlocks *pBlock) {
return pBlock->rowSize + TD_MEM_ROW_DATA_HEAD_SIZE + pBlock->boundColumnInfo.extendedVarLen; return pBlock->rowSize + TD_MEM_ROW_DATA_HEAD_SIZE + pBlock->boundColumnInfo.extendedVarLen;
} }
static FORCE_INLINE void checkAndConvertMemRow(SMemRow row, int32_t dataLen, int32_t kvLen) {
if (isDataRow(row)) {
if (kvLen < (dataLen * KVRatioConvert)) {
memRowSetConvert(row);
}
} else if (kvLen > dataLen) {
memRowSetConvert(row);
}
}
static FORCE_INLINE void initSMemRow(SMemRow row, uint8_t memRowType, STableDataBlocks *pBlock, int16_t nBoundCols) { static FORCE_INLINE void initSMemRow(SMemRow row, uint8_t memRowType, STableDataBlocks *pBlock, int16_t nBoundCols) {
memRowSetType(row, memRowType); memRowSetType(row, memRowType);
if (isDataRowT(memRowType)) { if (isDataRowT(memRowType)) {
...@@ -640,8 +632,7 @@ static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE; ...@@ -640,8 +632,7 @@ static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE;
static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE; static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE;
static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, SMemRow row, char *msg, char **str, static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, SMemRow row, char *msg, char **str,
bool primaryKey, int16_t timePrec, int32_t toffset, int16_t colId, bool primaryKey, int16_t timePrec, int32_t toffset, int16_t colId) {
int32_t *dataLen, int32_t *kvLen, uint8_t compareStat) {
int64_t iv; int64_t iv;
int32_t ret; int32_t ret;
char * endptr = NULL; char * endptr = NULL;
...@@ -653,26 +644,22 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -653,26 +644,22 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
switch (pSchema->type) { switch (pSchema->type) {
case TSDB_DATA_TYPE_BOOL: { // bool case TSDB_DATA_TYPE_BOOL: { // bool
if (isNullStr(pToken)) { if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} else { } else {
if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) {
if (strncmp(pToken->z, "true", pToken->n) == 0) { if (strncmp(pToken->z, "true", pToken->n) == 0) {
tscAppendMemRowColValEx(row, &TRUE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, &TRUE_VALUE, true, colId, pSchema->type, toffset);
} else if (strncmp(pToken->z, "false", pToken->n) == 0) { } else if (strncmp(pToken->z, "false", pToken->n) == 0) {
tscAppendMemRowColValEx(row, &FALSE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, &FALSE_VALUE, true, colId, pSchema->type, toffset);
compareStat);
} else { } else {
return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z); return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z);
} }
} else if (pToken->type == TK_INTEGER) { } else if (pToken->type == TK_INTEGER) {
iv = strtoll(pToken->z, NULL, 10); iv = strtoll(pToken->z, NULL, 10);
tscAppendMemRowColValEx(row, ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset, tdAppendMemRowColVal(row, ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset);
dataLen, kvLen, compareStat);
} else if (pToken->type == TK_FLOAT) { } else if (pToken->type == TK_FLOAT) {
double dv = strtod(pToken->z, NULL); double dv = strtod(pToken->z, NULL);
tscAppendMemRowColValEx(row, ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset, tdAppendMemRowColVal(row, ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset);
dataLen, kvLen, compareStat);
} else { } else {
return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z); return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z);
} }
...@@ -682,8 +669,7 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -682,8 +669,7 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_TINYINT:
if (isNullStr(pToken)) { if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
...@@ -693,15 +679,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -693,15 +679,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
} }
uint8_t tmpVal = (uint8_t)iv; uint8_t tmpVal = (uint8_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset);
} }
break; break;
case TSDB_DATA_TYPE_UTINYINT: case TSDB_DATA_TYPE_UTINYINT:
if (isNullStr(pToken)) { if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
...@@ -711,15 +696,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -711,15 +696,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
} }
uint8_t tmpVal = (uint8_t)iv; uint8_t tmpVal = (uint8_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset);
} }
break; break;
case TSDB_DATA_TYPE_SMALLINT: case TSDB_DATA_TYPE_SMALLINT:
if (isNullStr(pToken)) { if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
...@@ -729,15 +713,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -729,15 +713,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
} }
int16_t tmpVal = (int16_t)iv; int16_t tmpVal = (int16_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset);
} }
break; break;
case TSDB_DATA_TYPE_USMALLINT: case TSDB_DATA_TYPE_USMALLINT:
if (isNullStr(pToken)) { if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
...@@ -747,15 +730,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -747,15 +730,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
} }
uint16_t tmpVal = (uint16_t)iv; uint16_t tmpVal = (uint16_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset);
} }
break; break;
case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_INT:
if (isNullStr(pToken)) { if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
...@@ -765,15 +747,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -765,15 +747,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
} }
int32_t tmpVal = (int32_t)iv; int32_t tmpVal = (int32_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset);
} }
break; break;
case TSDB_DATA_TYPE_UINT: case TSDB_DATA_TYPE_UINT:
if (isNullStr(pToken)) { if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
...@@ -783,15 +764,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -783,15 +764,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
} }
uint32_t tmpVal = (uint32_t)iv; uint32_t tmpVal = (uint32_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset);
} }
break; break;
case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_BIGINT:
if (isNullStr(pToken)) { if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
...@@ -800,14 +780,13 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -800,14 +780,13 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z); return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z);
} }
tscAppendMemRowColValEx(row, &iv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, &iv, true, colId, pSchema->type, toffset);
} }
break; break;
case TSDB_DATA_TYPE_UBIGINT: case TSDB_DATA_TYPE_UBIGINT:
if (isNullStr(pToken)) { if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} else { } else {
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
...@@ -817,14 +796,13 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -817,14 +796,13 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
} }
uint64_t tmpVal = (uint64_t)iv; uint64_t tmpVal = (uint64_t)iv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset);
} }
break; break;
case TSDB_DATA_TYPE_FLOAT: case TSDB_DATA_TYPE_FLOAT:
if (isNullStr(pToken)) { if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} else { } else {
double dv; double dv;
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
...@@ -837,14 +815,13 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -837,14 +815,13 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
} }
float tmpVal = (float)dv; float tmpVal = (float)dv;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset);
} }
break; break;
case TSDB_DATA_TYPE_DOUBLE: case TSDB_DATA_TYPE_DOUBLE:
if (isNullStr(pToken)) { if (isNullStr(pToken)) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} else { } else {
double dv; double dv;
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
...@@ -855,15 +832,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -855,15 +832,14 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); return tscInvalidOperationMsg(msg, "illegal double data", pToken->z);
} }
tscAppendMemRowColValEx(row, &dv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, &dv, true, colId, pSchema->type, toffset);
} }
break; break;
case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_BINARY:
// binary data cannot be null-terminated char string, otherwise the last char of the string is lost // binary data cannot be null-terminated char string, otherwise the last char of the string is lost
if (pToken->type == TK_NULL) { if (pToken->type == TK_NULL) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} else { // too long values will return invalid sql, not be truncated automatically } else { // too long values will return invalid sql, not be truncated automatically
if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor
return tscInvalidOperationMsg(msg, "string data overflow", pToken->z); return tscInvalidOperationMsg(msg, "string data overflow", pToken->z);
...@@ -871,14 +847,13 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -871,14 +847,13 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
// STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); // STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n);
char *rowEnd = memRowEnd(row); char *rowEnd = memRowEnd(row);
STR_WITH_SIZE_TO_VARSTR(rowEnd, pToken->z, pToken->n); STR_WITH_SIZE_TO_VARSTR(rowEnd, pToken->z, pToken->n);
tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, rowEnd, false, colId, pSchema->type, toffset);
} }
break; break;
case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_NCHAR:
if (pToken->type == TK_NULL) { if (pToken->type == TK_NULL) {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} else { } else {
// if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
int32_t output = 0; int32_t output = 0;
...@@ -890,7 +865,7 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -890,7 +865,7 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
return tscInvalidOperationMsg(msg, buf, pToken->z); return tscInvalidOperationMsg(msg, buf, pToken->z);
} }
varDataSetLen(rowEnd, output); varDataSetLen(rowEnd, output);
tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, rowEnd, false, colId, pSchema->type, toffset);
} }
break; break;
...@@ -899,17 +874,16 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok ...@@ -899,17 +874,16 @@ static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pTok
if (primaryKey) { if (primaryKey) {
// When building SKVRow primaryKey, we should not skip even with NULL value. // When building SKVRow primaryKey, we should not skip even with NULL value.
int64_t tmpVal = 0; int64_t tmpVal = 0;
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset);
} else { } else {
tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, tdAppendMemRowColVal(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset);
compareStat);
} }
} else { } else {
int64_t tmpVal; int64_t tmpVal;
if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) { if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) {
return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z); return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z);
} }
tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); tdAppendMemRowColVal(row, &tmpVal, true, colId, pSchema->type, toffset);
} }
break; break;
......
...@@ -547,6 +547,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn ...@@ -547,6 +547,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
jniFromNCharToByteArray(env, (char *)row[i], length[i])); jniFromNCharToByteArray(env, (char *)row[i], length[i]));
break; break;
} }
case TSDB_DATA_TYPE_JSON: {
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteArrayFp, i,
jniFromNCharToByteArray(env, (char *)row[i], length[i]));
break;
}
case TSDB_DATA_TYPE_TIMESTAMP: { case TSDB_DATA_TYPE_TIMESTAMP: {
int precision = taos_result_precision(result); int precision = taos_result_precision(result);
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetTimestampFp, i, (jlong) * ((int64_t *)row[i]), precision); (*env)->CallVoidMethod(env, rowobj, g_rowdataSetTimestampFp, i, (jlong) * ((int64_t *)row[i]), precision);
......
...@@ -608,6 +608,7 @@ static void doMergeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, i ...@@ -608,6 +608,7 @@ static void doMergeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, i
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1); SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE); doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
} else { } else {
assert(!TSDB_FUNC_IS_SCALAR(functionId));
aAggs[functionId].mergeFunc(&pCtx[j]); aAggs[functionId].mergeFunc(&pCtx[j]);
} }
} }
...@@ -624,6 +625,7 @@ static void doFinalizeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx ...@@ -624,6 +625,7 @@ static void doFinalizeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1); SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE); doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
} else { } else {
assert(!TSDB_FUNC_IS_SCALAR(functionId));
aAggs[functionId].xFinalize(&pCtx[j]); aAggs[functionId].xFinalize(&pCtx[j]);
} }
} }
...@@ -663,8 +665,10 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD ...@@ -663,8 +665,10 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
if (pCtx[j].functionId < 0) { if (pCtx[j].functionId < 0) {
continue; continue;
} }
{
aAggs[pCtx[j].functionId].init(&pCtx[j], pCtx[j].resultInfo); assert(!TSDB_FUNC_IS_SCALAR(pCtx[j].functionId));
aAggs[pCtx[j].functionId].init(&pCtx[j], pCtx[j].resultInfo);
}
} }
doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr); doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr);
...@@ -706,12 +710,12 @@ SGlobalMerger* tscInitResObjForLocalQuery(int32_t numOfRes, int32_t rowLen, uint ...@@ -706,12 +710,12 @@ SGlobalMerger* tscInitResObjForLocalQuery(int32_t numOfRes, int32_t rowLen, uint
} }
// todo remove it // todo remove it
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) { int32_t doScalarExprCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {
int32_t maxRowSize = MAX(rowSize, finalRowSize); int32_t maxRowSize = MAX(rowSize, finalRowSize);
char* pbuf = calloc(1, (size_t)(pOutput->num * maxRowSize)); char* pbuf = calloc(1, (size_t)(pOutput->num * maxRowSize));
size_t size = tscNumOfFields(pQueryInfo); size_t size = tscNumOfFields(pQueryInfo);
SArithmeticSupport arithSup = {0}; SScalarExprSupport arithSup = {0};
// todo refactor // todo refactor
arithSup.offset = 0; arithSup.offset = 0;
...@@ -732,7 +736,10 @@ int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_ ...@@ -732,7 +736,10 @@ int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_
// calculate the result from several other columns // calculate the result from several other columns
if (pSup->pExpr->pExpr != NULL) { if (pSup->pExpr->pExpr != NULL) {
arithSup.pExprInfo = pSup->pExpr; arithSup.pExprInfo = pSup->pExpr;
arithmeticTreeTraverse(arithSup.pExprInfo->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithmeticInputSrc); tExprOperandInfo output;
output.data = pbuf + pOutput->num*offset;
exprTreeNodeTraverse(arithSup.pExprInfo->pExpr, (int32_t)pOutput->num, &output, &arithSup, TSDB_ORDER_ASC,
getScalarExprInputSrc);
} else { } else {
SExprInfo* pExpr = pSup->pExpr; SExprInfo* pExpr = pSup->pExpr;
memcpy(pbuf + pOutput->num * offset, pExpr->base.offset * pOutput->num + pOutput->data, (size_t)(pExpr->base.resBytes * pOutput->num)); memcpy(pbuf + pOutput->num * offset, pExpr->base.offset * pOutput->num + pOutput->data, (size_t)(pExpr->base.resBytes * pOutput->num));
...@@ -904,8 +911,10 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) { ...@@ -904,8 +911,10 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
clearOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity); clearOutputBuf(&pAggInfo->binfo, &pAggInfo->bufCapacity);
continue; continue;
} }
{
aAggs[pCtx->functionId].init(pCtx, pCtx->resultInfo); assert(!TSDB_FUNC_IS_SCALAR(pCtx->functionId));
aAggs[pCtx->functionId].init(pCtx, pCtx->resultInfo);
}
} }
} }
......
...@@ -85,16 +85,15 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { ...@@ -85,16 +85,15 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 1); pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 1);
dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i; dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i;
STR_WITH_MAXSIZE_TO_VARSTR(dst, type, pField->bytes); STR_WITH_MAXSIZE_TO_VARSTR(dst, type, pField->bytes);
int32_t bytes = pSchema[i].bytes; int32_t bytes = pSchema[i].bytes;
if (pSchema[i].type == TSDB_DATA_TYPE_BINARY || pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { if (pSchema[i].type == TSDB_DATA_TYPE_BINARY){
bytes -= VARSTR_HEADER_SIZE; bytes -= VARSTR_HEADER_SIZE;
}
if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { else if(pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) {
bytes = bytes / TSDB_NCHAR_SIZE; bytes -= VARSTR_HEADER_SIZE;
} bytes = bytes / TSDB_NCHAR_SIZE;
} }
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 2); pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 2);
...@@ -222,7 +221,7 @@ static int32_t tscGetNthFieldResult(TAOS_ROW row, TAOS_FIELD* fields, int *lengt ...@@ -222,7 +221,7 @@ static int32_t tscGetNthFieldResult(TAOS_ROW row, TAOS_FIELD* fields, int *lengt
return -1; return -1;
} }
uint8_t type = fields[idx].type; uint8_t type = fields[idx].type;
int32_t length = lengths[idx]; int32_t length = lengths[idx];
switch (type) { switch (type) {
case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_BOOL:
...@@ -248,6 +247,7 @@ static int32_t tscGetNthFieldResult(TAOS_ROW row, TAOS_FIELD* fields, int *lengt ...@@ -248,6 +247,7 @@ static int32_t tscGetNthFieldResult(TAOS_ROW row, TAOS_FIELD* fields, int *lengt
break; break;
case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_JSON:
memcpy(result, val, length); memcpy(result, val, length);
break; break;
case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_TIMESTAMP:
...@@ -636,9 +636,9 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, ...@@ -636,9 +636,9 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName,
if (type == TSDB_DATA_TYPE_NCHAR) { if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE; bytes = bytes/TSDB_NCHAR_SIZE;
} }
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypes[pSchema[i].type].name, bytes); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "`%s` %s(%d),", pSchema[i].name, tDataTypes[pSchema[i].type].name, bytes);
} else { } else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[pSchema[i].type].name); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "`%s` %s,", pSchema[i].name, tDataTypes[pSchema[i].type].name);
} }
} }
sprintf(result + strlen(result) - 1, "%s", ")"); sprintf(result + strlen(result) - 1, "%s", ")");
...@@ -663,9 +663,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, ...@@ -663,9 +663,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
if (type == TSDB_DATA_TYPE_NCHAR) { if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE; bytes = bytes/TSDB_NCHAR_SIZE;
} }
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"`%s` %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes);
} else { } else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[type].name); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "`%s` %s,", pSchema[i].name, tDataTypes[type].name);
} }
} }
snprintf(result + strlen(result) - 1, TSDB_MAX_BINARY_LEN - strlen(result), "%s %s", ")", "TAGS ("); snprintf(result + strlen(result) - 1, TSDB_MAX_BINARY_LEN - strlen(result), "%s %s", ")", "TAGS (");
...@@ -677,9 +677,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, ...@@ -677,9 +677,9 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
if (type == TSDB_DATA_TYPE_NCHAR) { if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE; bytes = bytes/TSDB_NCHAR_SIZE;
} }
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "`%s` %s(%d),", pSchema[i].name,tDataTypes[pSchema[i].type].name, bytes);
} else { } else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypes[type].name); snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "`%s` %s,", pSchema[i].name, tDataTypes[type].name);
} }
} }
sprintf(result + strlen(result) - 1, "%s", ")"); sprintf(result + strlen(result) - 1, "%s", ")");
......
...@@ -41,9 +41,8 @@ enum { ...@@ -41,9 +41,8 @@ enum {
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows); static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows);
static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo *pColInfo, SSchema *pSchema, static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo *pColInfo, SSchema *pSchema,
char *str, char **end); char *str, char **end);
int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols, int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, SParsedDataColInfo *pColInfo) {
int32_t allNullLen) { ASSERT(nRows >= 0 && pColInfo->numOfCols > 0 && (pColInfo->numOfBound <= pColInfo->numOfCols));
ASSERT(nRows >= 0 && nCols > 0 && (nBoundCols <= nCols));
if (nRows > 0) { if (nRows > 0) {
// already init(bind multiple rows by single column) // already init(bind multiple rows by single column)
if (pBuilder->compareStat == ROW_COMPARE_NEED && (pBuilder->rowInfo != NULL)) { if (pBuilder->compareStat == ROW_COMPARE_NEED && (pBuilder->rowInfo != NULL)) {
...@@ -51,41 +50,12 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 ...@@ -51,41 +50,12 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3
} }
} }
// default compareStat is ROW_COMPARE_NO_NEED uint32_t dataLen = TD_MEM_ROW_DATA_HEAD_SIZE + pColInfo->allNullLen;
if (nBoundCols == 0) { // file input uint32_t kvLen = TD_MEM_ROW_KV_HEAD_SIZE + pColInfo->numOfBound * sizeof(SColIdx) + pColInfo->boundNullLen;
pBuilder->memRowType = SMEM_ROW_DATA; if (isUtilizeKVRow(kvLen, dataLen)) {
return TSDB_CODE_SUCCESS; pBuilder->memRowType = SMEM_ROW_KV;
} else { } else {
float boundRatio = ((float)nBoundCols / (float)nCols); pBuilder->memRowType = SMEM_ROW_DATA;
if (boundRatio < KVRatioKV) {
pBuilder->memRowType = SMEM_ROW_KV;
return TSDB_CODE_SUCCESS;
} else if (boundRatio > KVRatioData) {
pBuilder->memRowType = SMEM_ROW_DATA;
return TSDB_CODE_SUCCESS;
}
pBuilder->compareStat = ROW_COMPARE_NEED;
if (boundRatio < KVRatioPredict) {
pBuilder->memRowType = SMEM_ROW_KV;
} else {
pBuilder->memRowType = SMEM_ROW_DATA;
}
}
pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx);
if (nRows > 0) {
pBuilder->rowInfo = tcalloc(nRows, sizeof(SMemRowInfo));
if (pBuilder->rowInfo == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
for (int i = 0; i < nRows; ++i) {
(pBuilder->rowInfo + i)->dataLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen;
(pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen;
}
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -385,6 +355,19 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha ...@@ -385,6 +355,19 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha
} }
break; break;
case TSDB_DATA_TYPE_JSON:
if (pToken->n >= pSchema->bytes) { // reserve 1 byte for select
return tscInvalidOperationMsg(msg, "json tag length too long", pToken->z);
}
if (pToken->type == TK_NULL) {
*(int8_t *)payload = TSDB_DATA_TINYINT_NULL;
} else if (pToken->type != TK_STRING){
tscInvalidOperationMsg(msg, "invalid json data", pToken->z);
} else{
*((int8_t *)payload) = TSDB_DATA_JSON_PLACEHOLDER;
}
break;
case TSDB_DATA_TYPE_TIMESTAMP: { case TSDB_DATA_TYPE_TIMESTAMP: {
if (pToken->type == TK_NULL) { if (pToken->type == TK_NULL) {
if (primaryKey) { if (primaryKey) {
...@@ -455,8 +438,6 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i ...@@ -455,8 +438,6 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
STableMeta * pTableMeta = pDataBlocks->pTableMeta; STableMeta * pTableMeta = pDataBlocks->pTableMeta;
SSchema * schema = tscGetTableSchema(pTableMeta); SSchema * schema = tscGetTableSchema(pTableMeta);
SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder; SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder;
int32_t dataLen = spd->allNullLen + TD_MEM_ROW_DATA_HEAD_SIZE;
int32_t kvLen = pBuilder->kvRowInitLen;
bool isParseBindParam = false; bool isParseBindParam = false;
initSMemRow(row, pBuilder->memRowType, pDataBlocks, spd->numOfBound); initSMemRow(row, pBuilder->memRowType, pDataBlocks, spd->numOfBound);
...@@ -533,8 +514,8 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i ...@@ -533,8 +514,8 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
int16_t colId = -1; int16_t colId = -1;
tscGetMemRowAppendInfo(schema, pBuilder->memRowType, spd, i, &toffset, &colId); tscGetMemRowAppendInfo(schema, pBuilder->memRowType, spd, i, &toffset, &colId);
int32_t ret = tsParseOneColumnKV(pSchema, &sToken, row, pInsertParam->msg, str, isPrimaryKey, timePrec, toffset, int32_t ret =
colId, &dataLen, &kvLen, pBuilder->compareStat); tsParseOneColumnKV(pSchema, &sToken, row, pInsertParam->msg, str, isPrimaryKey, timePrec, toffset, colId);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
return ret; return ret;
} }
...@@ -549,13 +530,8 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i ...@@ -549,13 +530,8 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
} }
if (!isParseBindParam) { if (!isParseBindParam) {
// 2. check and set convert flag // set the null value for the columns that do not assign values
if (pBuilder->compareStat == ROW_COMPARE_NEED) { if ((spd->numOfBound < spd->numOfCols) && isDataRow(row)) {
checkAndConvertMemRow(row, dataLen, kvLen);
}
// 3. set the null value for the columns that do not assign values
if ((spd->numOfBound < spd->numOfCols) && isDataRow(row) && !isNeedConvertRow(row)) {
SDataRow dataRow = memRowDataBody(row); SDataRow dataRow = memRowDataBody(row);
for (int32_t i = 0; i < spd->numOfCols; ++i) { for (int32_t i = 0; i < spd->numOfCols; ++i) {
if (spd->cols[i].valStat == VAL_STAT_NONE) { if (spd->cols[i].valStat == VAL_STAT_NONE) {
...@@ -565,7 +541,7 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i ...@@ -565,7 +541,7 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
} }
} }
*len = getExtendedRowSize(pDataBlocks); *len = pBuilder->rowSize;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -618,11 +594,11 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn ...@@ -618,11 +594,11 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn
int32_t extendedRowSize = getExtendedRowSize(pDataBlock); int32_t extendedRowSize = getExtendedRowSize(pDataBlock);
if (TSDB_CODE_SUCCESS != if (TSDB_CODE_SUCCESS != (code = initMemRowBuilder(&pDataBlock->rowBuilder, 0, &pDataBlock->boundColumnInfo))) {
(code = initMemRowBuilder(&pDataBlock->rowBuilder, 0, tinfo.numOfColumns, pDataBlock->boundColumnInfo.numOfBound,
pDataBlock->boundColumnInfo.allNullLen))) {
return code; return code;
} }
pDataBlock->rowBuilder.rowSize = extendedRowSize;
while (1) { while (1) {
index = 0; index = 0;
sToken = tStrGetToken(*str, &index, false); sToken = tStrGetToken(*str, &index, false);
...@@ -701,6 +677,7 @@ void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32 ...@@ -701,6 +677,7 @@ void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32
pColInfo->boundedColumns[i] = i; pColInfo->boundedColumns[i] = i;
} }
pColInfo->allNullLen += pColInfo->flen; pColInfo->allNullLen += pColInfo->flen;
pColInfo->boundNullLen = pColInfo->allNullLen; // default set allNullLen
pColInfo->extendedVarLen = (uint16_t)(nVar * sizeof(VarDataOffsetT)); pColInfo->extendedVarLen = (uint16_t)(nVar * sizeof(VarDataOffsetT));
} }
...@@ -1094,9 +1071,26 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC ...@@ -1094,9 +1071,26 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
return code; return code;
} }
tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal); tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal, false);
}
if(pSchema->type == TSDB_DATA_TYPE_JSON){
assert(spd.numOfBound == 1);
if(sToken.n > TSDB_MAX_JSON_TAGS_LEN/TSDB_NCHAR_SIZE){
tdDestroyKVRowBuilder(&kvRowBuilder);
tscDestroyBoundColumnInfo(&spd);
return tscSQLSyntaxErrMsg(pInsertParam->msg, "json tag too long", NULL);
}
char* json = strndup(sToken.z, sToken.n);
code = parseJsontoTagData(json, &kvRowBuilder, pInsertParam->msg, pTagSchema[spd.boundedColumns[0]].colId);
if (code != TSDB_CODE_SUCCESS) {
tdDestroyKVRowBuilder(&kvRowBuilder);
tscDestroyBoundColumnInfo(&spd);
tfree(json);
return code;
}
tfree(json);
}
}
tscDestroyBoundColumnInfo(&spd); tscDestroyBoundColumnInfo(&spd);
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder); SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
...@@ -1110,7 +1104,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC ...@@ -1110,7 +1104,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
if (pInsertParam->tagData.dataLen <= 0){ if (pInsertParam->tagData.dataLen <= 0){
return tscSQLSyntaxErrMsg(pInsertParam->msg, "tag value expected", NULL); return tscSQLSyntaxErrMsg(pInsertParam->msg, "tag value expected", NULL);
} }
char* pTag = realloc(pInsertParam->tagData.data, pInsertParam->tagData.dataLen); char* pTag = realloc(pInsertParam->tagData.data, pInsertParam->tagData.dataLen);
if (pTag == NULL) { if (pTag == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
...@@ -1224,6 +1218,7 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat ...@@ -1224,6 +1218,7 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
int32_t nCols = pColInfo->numOfCols; int32_t nCols = pColInfo->numOfCols;
pColInfo->numOfBound = 0; pColInfo->numOfBound = 0;
pColInfo->boundNullLen = 0;
memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * nCols); memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * nCols);
for (int32_t i = 0; i < nCols; ++i) { for (int32_t i = 0; i < nCols; ++i) {
pColInfo->cols[i].valStat = VAL_STAT_NONE; pColInfo->cols[i].valStat = VAL_STAT_NONE;
...@@ -1281,6 +1276,17 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat ...@@ -1281,6 +1276,17 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
pColInfo->cols[t].valStat = VAL_STAT_HAS; pColInfo->cols[t].valStat = VAL_STAT_HAS;
pColInfo->boundedColumns[pColInfo->numOfBound] = t; pColInfo->boundedColumns[pColInfo->numOfBound] = t;
++pColInfo->numOfBound; ++pColInfo->numOfBound;
switch (pSchema[t].type) {
case TSDB_DATA_TYPE_BINARY:
pColInfo->boundNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES);
break;
case TSDB_DATA_TYPE_NCHAR:
pColInfo->boundNullLen += (VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
break;
default:
pColInfo->boundNullLen += TYPE_BYTES[pSchema[t].type];
break;
}
findColumnIndex = true; findColumnIndex = true;
if (isOrdered && (lastColIdx > t)) { if (isOrdered && (lastColIdx > t)) {
isOrdered = false; isOrdered = false;
...@@ -1304,6 +1310,17 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat ...@@ -1304,6 +1310,17 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
pColInfo->cols[t].valStat = VAL_STAT_HAS; pColInfo->cols[t].valStat = VAL_STAT_HAS;
pColInfo->boundedColumns[pColInfo->numOfBound] = t; pColInfo->boundedColumns[pColInfo->numOfBound] = t;
++pColInfo->numOfBound; ++pColInfo->numOfBound;
switch (pSchema[t].type) {
case TSDB_DATA_TYPE_BINARY:
pColInfo->boundNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES);
break;
case TSDB_DATA_TYPE_NCHAR:
pColInfo->boundNullLen += (VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
break;
default:
pColInfo->boundNullLen += TYPE_BYTES[pSchema[t].type];
break;
}
findColumnIndex = true; findColumnIndex = true;
if (isOrdered && (lastColIdx > t)) { if (isOrdered && (lastColIdx > t)) {
isOrdered = false; isOrdered = false;
...@@ -1754,13 +1771,18 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow ...@@ -1754,13 +1771,18 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
goto _error; goto _error;
} }
tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(pTableDataBlock), &maxRows); int32_t extendedRowSize = getExtendedRowSize(pTableDataBlock);
tscAllocateMemIfNeed(pTableDataBlock, extendedRowSize, &maxRows);
tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW); tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW);
if (tokenBuf == NULL) { if (tokenBuf == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY; code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error; goto _error;
} }
// insert from .csv means full and ordered columns, thus use SDataRow all the time
ASSERT(SMEM_ROW_DATA == pTableDataBlock->rowBuilder.memRowType);
pTableDataBlock->rowBuilder.rowSize = extendedRowSize;
while ((readLen = tgetline(&line, &n, fp)) != -1) { while ((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
line[--readLen] = 0; line[--readLen] = 0;
......
...@@ -125,8 +125,9 @@ static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char ...@@ -125,8 +125,9 @@ static int32_t parseTelnetTimeStamp(TAOS_SML_KV **pTS, int *num_kvs, const char
} }
tfree(value); tfree(value);
(*pTS)->key = tcalloc(sizeof(key), 1); (*pTS)->key = tcalloc(sizeof(key) + TS_ESCAPE_CHAR_SIZE, 1);
memcpy((*pTS)->key, key, sizeof(key)); memcpy((*pTS)->key, key, sizeof(key));
addEscapeCharToString((*pTS)->key, (int32_t)strlen(key));
*num_kvs += 1; *num_kvs += 1;
*index = cur + 1; *index = cur + 1;
......
...@@ -1533,6 +1533,41 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO ...@@ -1533,6 +1533,41 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t stmtValidateValuesFields(SSqlCmd *pCmd, char * sql) {
int32_t loopCont = 1, index0 = 0, values = 0;
SStrToken sToken;
while (loopCont) {
sToken = tStrGetToken(sql, &index0, false);
if (sToken.n <= 0) {
return TSDB_CODE_SUCCESS;
}
switch (sToken.type) {
case TK_RP:
if (values) {
return TSDB_CODE_SUCCESS;
}
break;
case TK_VALUES:
values = 1;
break;
case TK_QUESTION:
case TK_LP:
break;
default:
if (values) {
tscError("only ? allowed in values");
return tscInvalidOperationMsg(pCmd->payload, "only ? allowed in values", NULL);
}
break;
}
}
return TSDB_CODE_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// interface functions // interface functions
...@@ -1637,6 +1672,11 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { ...@@ -1637,6 +1672,11 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
STMT_RET(ret); STMT_RET(ret);
} }
ret = stmtValidateValuesFields(&pSql->cmd, pSql->sqlstr);
if (ret != TSDB_CODE_SUCCESS) {
STMT_RET(ret);
}
if (pStmt->multiTbInsert) { if (pStmt->multiTbInsert) {
STMT_RET(TSDB_CODE_SUCCESS); STMT_RET(TSDB_CODE_SUCCESS);
} }
......
此差异已折叠。
...@@ -506,7 +506,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { ...@@ -506,7 +506,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
} }
} }
if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command]) { if (pRes->code == TSDB_CODE_SUCCESS && pCmd->command < TSDB_SQL_MAX && tscProcessMsgRsp[pCmd->command]) {
rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql); rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
} }
...@@ -832,7 +832,7 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, ...@@ -832,7 +832,7 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo,
return TSDB_CODE_TSC_INVALID_TABLE_NAME; return TSDB_CODE_TSC_INVALID_TABLE_NAME;
} }
if (validateColumn && !tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) { if (validateColumn && !tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId)) {
tscError("0x%"PRIx64" table schema is not matched with parsed sql", id); tscError("0x%"PRIx64" table schema is not matched with parsed sql", id);
return TSDB_CODE_TSC_INVALID_OPERATION; return TSDB_CODE_TSC_INVALID_OPERATION;
} }
...@@ -906,6 +906,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -906,6 +906,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SArray* queryOperator = createExecOperatorPlan(&query); SArray* queryOperator = createExecOperatorPlan(&query);
SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload; SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload;
tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version)); tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version));
int32_t numOfTags = query.numOfTags; int32_t numOfTags = query.numOfTags;
...@@ -1146,6 +1147,23 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1146,6 +1147,23 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
memcpy(pMsg, pSql->sqlstr, sqlLen); memcpy(pMsg, pSql->sqlstr, sqlLen);
pMsg += sqlLen; pMsg += sqlLen;
/*
//MSG EXTEND DEMO
pQueryMsg->extend = 1;
STLV *tlv = (STLV *)pMsg;
tlv->type = htons(TLV_TYPE_DUMMY);
tlv->len = htonl(sizeof(int16_t));
*(int16_t *)tlv->value = htons(12345);
pMsg += sizeof(*tlv) + ntohl(tlv->len);
tlv = (STLV *)pMsg;
tlv->len = 0;
pMsg += sizeof(*tlv);
*/
int32_t msgLen = (int32_t)(pMsg - pCmd->payload); int32_t msgLen = (int32_t)(pMsg - pCmd->payload);
tscDebug("0x%"PRIx64" msg built success, len:%d bytes", pSql->self, msgLen); tscDebug("0x%"PRIx64" msg built success, len:%d bytes", pSql->self, msgLen);
...@@ -1492,7 +1510,8 @@ int tscEstimateCreateTableMsgLength(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1492,7 +1510,8 @@ int tscEstimateCreateTableMsgLength(SSqlObj *pSql, SSqlInfo *pInfo) {
SCreateTableSql *pCreateTableInfo = pInfo->pCreateTableInfo; SCreateTableSql *pCreateTableInfo = pInfo->pCreateTableInfo;
if (pCreateTableInfo->type == TSQL_CREATE_TABLE_FROM_STABLE) { if (pCreateTableInfo->type == TSQL_CREATE_TABLE_FROM_STABLE) {
int32_t numOfTables = (int32_t)taosArrayGetSize(pInfo->pCreateTableInfo->childTableInfo); int32_t numOfTables = (int32_t)taosArrayGetSize(pInfo->pCreateTableInfo->childTableInfo);
size += numOfTables * (sizeof(SCreateTableMsg) + TSDB_MAX_TAGS_LEN); size += numOfTables * (sizeof(SCreateTableMsg) +
((TSDB_MAX_TAGS_LEN > TSDB_MAX_JSON_TAGS_LEN)?TSDB_MAX_TAGS_LEN:TSDB_MAX_JSON_TAGS_LEN));
} else { } else {
size += sizeof(SSchema) * (pCmd->numOfCols + pCmd->count); size += sizeof(SSchema) * (pCmd->numOfCols + pCmd->count);
} }
...@@ -1614,9 +1633,6 @@ int tscEstimateAlterTableMsgLength(SSqlCmd *pCmd) { ...@@ -1614,9 +1633,6 @@ int tscEstimateAlterTableMsgLength(SSqlCmd *pCmd) {
} }
int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
char *pMsg;
int msgLen = 0;
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd);
...@@ -1645,14 +1661,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { ...@@ -1645,14 +1661,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pSchema++; pSchema++;
} }
pMsg = (char *)pSchema; int msgLen = sizeof(SAlterTableMsg) + sizeof(SSchema) * tscNumOfFields(pQueryInfo);
pAlterTableMsg->tagValLen = htonl(pAlterInfo->tagData.dataLen);
if (pAlterInfo->tagData.dataLen > 0) {
memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
}
pMsg += pAlterInfo->tagData.dataLen;
msgLen = (int32_t)(pMsg - (char*)pAlterTableMsg);
pCmd->payloadLen = msgLen; pCmd->payloadLen = msgLen;
pCmd->msgType = TSDB_MSG_TYPE_CM_ALTER_TABLE; pCmd->msgType = TSDB_MSG_TYPE_CM_ALTER_TABLE;
...@@ -1854,7 +1863,9 @@ int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) { ...@@ -1854,7 +1863,9 @@ int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) {
uint64_t localQueryId = pSql->self; uint64_t localQueryId = pSql->self;
qTableQuery(pQueryInfo->pQInfo, &localQueryId); qTableQuery(pQueryInfo->pQInfo, &localQueryId);
convertQueryResult(pRes, pQueryInfo, pSql->self, true); bool convertJson = true;
if (pQueryInfo->isStddev == true) convertJson = false;
convertQueryResult(pRes, pQueryInfo, pSql->self, true, convertJson);
code = pRes->code; code = pRes->code;
if (pRes->code == TSDB_CODE_SUCCESS) { if (pRes->code == TSDB_CODE_SUCCESS) {
...@@ -2813,7 +2824,11 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn ...@@ -2813,7 +2824,11 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
tscAddQueryInfo(&pNew->cmd); tscAddQueryInfo(&pNew->cmd);
SQueryInfo *pNewQueryInfo = tscGetQueryInfoS(&pNew->cmd); SQueryInfo *pNewQueryInfo = tscGetQueryInfoS(&pNew->cmd);
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) { int payLoadLen = TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen;
if (autocreate && pSql->cmd.insertParam.tagData.dataLen != 0) {
payLoadLen += pSql->cmd.insertParam.tagData.dataLen;
}
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, payLoadLen)) {
tscError("0x%"PRIx64" malloc failed for payload to get table meta", pSql->self); tscError("0x%"PRIx64" malloc failed for payload to get table meta", pSql->self);
tscFreeSqlObj(pNew); tscFreeSqlObj(pNew);
......
...@@ -445,7 +445,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { ...@@ -445,7 +445,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
// revise the length for binary and nchar fields // revise the length for binary and nchar fields
if (f[j].type == TSDB_DATA_TYPE_BINARY) { if (f[j].type == TSDB_DATA_TYPE_BINARY) {
f[j].bytes -= VARSTR_HEADER_SIZE; f[j].bytes -= VARSTR_HEADER_SIZE;
} else if (f[j].type == TSDB_DATA_TYPE_NCHAR) { } else if (f[j].type == TSDB_DATA_TYPE_NCHAR || f[j].type == TSDB_DATA_TYPE_JSON) {
f[j].bytes = (f[j].bytes - VARSTR_HEADER_SIZE)/TSDB_NCHAR_SIZE; f[j].bytes = (f[j].bytes - VARSTR_HEADER_SIZE)/TSDB_NCHAR_SIZE;
} }
......
...@@ -227,6 +227,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { ...@@ -227,6 +227,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
if (skipped) { if (skipped) {
slot = 0; slot = 0;
stackidx = 0; stackidx = 0;
tVariantDestroy(&tag);
continue; continue;
} }
...@@ -334,6 +335,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { ...@@ -334,6 +335,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
} }
if (mergeDone) { if (mergeDone) {
tVariantDestroy(&tag);
break; break;
} }
...@@ -341,6 +343,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { ...@@ -341,6 +343,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
stackidx = 0; stackidx = 0;
skipRemainValue(mainCtx->p->pTSBuf, &tag); skipRemainValue(mainCtx->p->pTSBuf, &tag);
tVariantDestroy(&tag);
} }
stackidx = 0; stackidx = 0;
...@@ -633,16 +636,21 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { ...@@ -633,16 +636,21 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
// set the join condition tag column info, todo extract method // set the join condition tag column info, todo extract method
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
assert(pQueryInfo->tagCond.joinInfo.hasJoin); assert(pQueryInfo->tagCond.joinInfo.hasJoin);
pExpr->base.numOfParams = 0; // the value is 0 by default. just make sure.
// add json tag key, if there is no json tag key, just hold place.
tVariantCreateFromBinary(&(pExpr->base.param[pExpr->base.numOfParams]), pQueryInfo->tagCond.joinInfo.joinTables[0]->tagJsonKeyName,
strlen(pQueryInfo->tagCond.joinInfo.joinTables[0]->tagJsonKeyName), TSDB_DATA_TYPE_BINARY);
pExpr->base.numOfParams++;
int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid);
// set the tag column id for executor to extract correct tag value // set the tag column id for executor to extract correct tag value
tVariant* pVariant = &pExpr->base.param[0]; tVariant* pVariant = &pExpr->base.param[pExpr->base.numOfParams];
pVariant->i64 = colId; pVariant->i64 = colId;
pVariant->nType = TSDB_DATA_TYPE_BIGINT; pVariant->nType = TSDB_DATA_TYPE_BIGINT;
pVariant->nLen = sizeof(int64_t); pVariant->nLen = sizeof(int64_t);
pExpr->base.numOfParams = 1; pExpr->base.numOfParams++;
} }
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
...@@ -729,8 +737,15 @@ int32_t tagValCompar(const void* p1, const void* p2) { ...@@ -729,8 +737,15 @@ int32_t tagValCompar(const void* p1, const void* p2) {
const STidTags* t1 = (const STidTags*) varDataVal(p1); const STidTags* t1 = (const STidTags*) varDataVal(p1);
const STidTags* t2 = (const STidTags*) varDataVal(p2); const STidTags* t2 = (const STidTags*) varDataVal(p2);
__compar_fn_t func = getComparFunc(t1->padding, 0); if (t1->padding == TSDB_DATA_TYPE_JSON){
bool canReturn = true;
int32_t result = jsonCompareUnit(t1->tag, t2->tag, &canReturn);
if(canReturn) return result;
__compar_fn_t func = getComparFunc(t1->tag[0], 0);
return func(t1->tag + CHAR_BYTES, t2->tag + CHAR_BYTES);
}
__compar_fn_t func = getComparFunc(t1->padding, 0);
return func(t1->tag, t2->tag); return func(t1->tag, t2->tag);
} }
...@@ -821,16 +836,21 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* ...@@ -821,16 +836,21 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1}; SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1};
SColumnIndex index = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; SColumnIndex index = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); SExprInfo *pExpr = tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd));
// set the tags value for ts_comp function // set the tags value for ts_comp function
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
SExprInfo *pExpr = tscExprGet(pQueryInfo, 0); pExpr->base.numOfParams = 0; // the value is 0 by default. just make sure.
int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid); // add json tag key, if there is no json tag key, just hold place.
pExpr->base.param[0].i64 = tagColId; tVariantCreateFromBinary(&(pExpr->base.param[pExpr->base.numOfParams]), pSupporter->tagCond.joinInfo.joinTables[0]->tagJsonKeyName,
pExpr->base.param[0].nLen = sizeof(int64_t); strlen(pSupporter->tagCond.joinInfo.joinTables[0]->tagJsonKeyName), TSDB_DATA_TYPE_BINARY);
pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT; pExpr->base.numOfParams++;
pExpr->base.numOfParams = 1;
int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid);
pExpr->base.param[pExpr->base.numOfParams].i64 = tagColId;
pExpr->base.param[pExpr->base.numOfParams].nLen = sizeof(int64_t);
pExpr->base.param[pExpr->base.numOfParams].nType = TSDB_DATA_TYPE_BIGINT;
pExpr->base.numOfParams++;
} }
// add the filter tag column // add the filter tag column
...@@ -2012,7 +2032,12 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter ...@@ -2012,7 +2032,12 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
// set get tags query type // set get tags query type
TSDB_QUERY_SET_TYPE(pNewQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY); TSDB_QUERY_SET_TYPE(pNewQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY);
tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TID_TAG, &colIndex, &s1, TSDB_COL_TAG, getNewResColId(pCmd)); SExprInfo* pExpr = tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TID_TAG, &colIndex, &s1, TSDB_COL_TAG, getNewResColId(pCmd));
if(strlen(pTagCond->joinInfo.joinTables[0]->tagJsonKeyName) > 0){
tVariantCreateFromBinary(&(pExpr->base.param[pExpr->base.numOfParams]), pTagCond->joinInfo.joinTables[0]->tagJsonKeyName,
strlen(pTagCond->joinInfo.joinTables[0]->tagJsonKeyName), TSDB_DATA_TYPE_BINARY);
pExpr->base.numOfParams++;
}
size_t numOfCols = taosArrayGetSize(pNewQueryInfo->colList); size_t numOfCols = taosArrayGetSize(pNewQueryInfo->colList);
tscDebug( tscDebug(
...@@ -2025,15 +2050,6 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter ...@@ -2025,15 +2050,6 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
SColumnIndex colIndex = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; SColumnIndex colIndex = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TS_COMP, &colIndex, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd)); tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TS_COMP, &colIndex, &colSchema, TSDB_COL_NORMAL, getNewResColId(pCmd));
// set the tags value for ts_comp function
SExprInfo *pExpr = tscExprGet(pNewQueryInfo, 0);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
int16_t tagColId = tscGetJoinTagColIdByUid(&pSupporter->tagCond, pTableMetaInfo->pTableMeta->id.uid);
pExpr->base.param->i64 = tagColId;
pExpr->base.numOfParams = 1;
}
// add the filter tag column // add the filter tag column
if (pSupporter->colList != NULL) { if (pSupporter->colList != NULL) {
size_t s = taosArrayGetSize(pSupporter->colList); size_t s = taosArrayGetSize(pSupporter->colList);
...@@ -2427,6 +2443,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { ...@@ -2427,6 +2443,7 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pNewQueryInfo, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pNewQueryInfo, 0);
tscInitQueryInfo(pNewQueryInfo); tscInitQueryInfo(pNewQueryInfo);
pNewQueryInfo->isStddev = true; // for json tag
// add the group cond // add the group cond
pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr; pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr;
...@@ -2490,6 +2507,10 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { ...@@ -2490,6 +2507,10 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
} }
SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG, getNewResColId(pCmd)); SExprInfo* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG, getNewResColId(pCmd));
if (schema->type == TSDB_DATA_TYPE_JSON){
p->base.numOfParams = pExpr->base.numOfParams;
tVariantAssign(&p->base.param[0], &pExpr->base.param[0]);
}
p->base.resColId = pExpr->base.resColId; p->base.resColId = pExpr->base.resColId;
} else if (pExpr->base.functionId == TSDB_FUNC_PRJ) { } else if (pExpr->base.functionId == TSDB_FUNC_PRJ) {
int32_t num = (int32_t) taosArrayGetSize(pNewQueryInfo->groupbyExpr.columnInfo); int32_t num = (int32_t) taosArrayGetSize(pNewQueryInfo->groupbyExpr.columnInfo);
...@@ -3600,7 +3621,7 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) { ...@@ -3600,7 +3621,7 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) {
finalRowSize += pField->bytes; finalRowSize += pField->bytes;
} }
doArithmeticCalculate(pQueryInfo, pFilePage, rowSize, finalRowSize); doScalarExprCalculate(pQueryInfo, pFilePage, rowSize, finalRowSize);
pRes->data = pFilePage->data; pRes->data = pFilePage->data;
tscSetResRawPtr(pRes, pQueryInfo, pRes->dataConverted); tscSetResRawPtr(pRes, pQueryInfo, pRes->dataConverted);
...@@ -3639,8 +3660,8 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { ...@@ -3639,8 +3660,8 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
} }
} }
char *getArithmeticInputSrc(void *param, const char *name, int32_t colId) { char * getScalarExprInputSrc(void *param, const char *name, int32_t colId) {
SArithmeticSupport *pSupport = (SArithmeticSupport *) param; SScalarExprSupport*pSupport = (SScalarExprSupport*) param;
int32_t index = -1; int32_t index = -1;
SExprInfo* pExpr = NULL; SExprInfo* pExpr = NULL;
...@@ -3681,7 +3702,9 @@ TAOS_ROW doSetResultRowData(SSqlObj *pSql) { ...@@ -3681,7 +3702,9 @@ TAOS_ROW doSetResultRowData(SSqlObj *pSql) {
int32_t type = pInfo->field.type; int32_t type = pInfo->field.type;
int32_t bytes = pInfo->field.bytes; int32_t bytes = pInfo->field.bytes;
if (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR) { if (pQueryInfo->isStddev && type == TSDB_DATA_TYPE_JSON){ // for json tag compare in the second round of stddev
pRes->tsrow[j] = pRes->urow[i];
}else if (!IS_VAR_DATA_TYPE(type) && type != TSDB_DATA_TYPE_JSON) {
pRes->tsrow[j] = isNull(pRes->urow[i], type) ? NULL : pRes->urow[i]; pRes->tsrow[j] = isNull(pRes->urow[i], type) ? NULL : pRes->urow[i];
} else { } else {
pRes->tsrow[j] = isNull(pRes->urow[i], type) ? NULL : varDataVal(pRes->urow[i]); pRes->tsrow[j] = isNull(pRes->urow[i], type) ? NULL : varDataVal(pRes->urow[i]);
......
此差异已折叠。
...@@ -25,6 +25,7 @@ typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t l ...@@ -25,6 +25,7 @@ typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t l
_arithmetic_operator_fn_t getArithmeticOperatorFn(int32_t arithmeticOptr); _arithmetic_operator_fn_t getArithmeticOperatorFn(int32_t arithmeticOptr);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -448,13 +448,14 @@ typedef struct { ...@@ -448,13 +448,14 @@ typedef struct {
#define kvRowSetNCols(r, n) kvRowNCols(r) = (n) #define kvRowSetNCols(r, n) kvRowNCols(r) = (n)
#define kvRowColIdx(r) (SColIdx *)POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE) #define kvRowColIdx(r) (SColIdx *)POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE)
#define kvRowValues(r) POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * kvRowNCols(r)) #define kvRowValues(r) POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * kvRowNCols(r))
#define kvRowKeys(r) POINTER_SHIFT(r, *(uint16_t *)POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE + sizeof(int16_t)))
#define kvRowCpy(dst, r) memcpy((dst), (r), kvRowLen(r)) #define kvRowCpy(dst, r) memcpy((dst), (r), kvRowLen(r))
#define kvRowColVal(r, colIdx) POINTER_SHIFT(kvRowValues(r), (colIdx)->offset) #define kvRowColVal(r, colIdx) POINTER_SHIFT(kvRowValues(r), (colIdx)->offset)
#define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i)) #define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i))
#define kvRowFree(r) tfree(r) #define kvRowFree(r) tfree(r)
#define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r)) #define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r))
#define kvRowValLen(r) (kvRowLen(r) - TD_KV_ROW_HEAD_SIZE - sizeof(SColIdx) * kvRowNCols(r)) #define kvRowValLen(r) (kvRowLen(r) - TD_KV_ROW_HEAD_SIZE - sizeof(SColIdx) * kvRowNCols(r))
#define kvRowTKey(r) (*(TKEY *)(kvRowValues(r))) #define kvRowTKey(r) (*(TKEY *)(kvRowKeys(r)))
#define kvRowKey(r) tdGetKey(kvRowTKey(r)) #define kvRowKey(r) tdGetKey(kvRowTKey(r))
#define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r)) #define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r))
...@@ -546,7 +547,7 @@ void tdDestroyKVRowBuilder(SKVRowBuilder *pBuilder); ...@@ -546,7 +547,7 @@ void tdDestroyKVRowBuilder(SKVRowBuilder *pBuilder);
void tdResetKVRowBuilder(SKVRowBuilder *pBuilder); void tdResetKVRowBuilder(SKVRowBuilder *pBuilder);
SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder); SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder);
static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) { static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value, bool isJumpJsonVType) {
if (pBuilder->nCols >= pBuilder->tCols) { if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2; pBuilder->tCols *= 2;
SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
...@@ -559,9 +560,14 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, ...@@ -559,9 +560,14 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId,
pBuilder->nCols++; pBuilder->nCols++;
int tlen = IS_VAR_DATA_TYPE(type) ? varDataTLen(value) : TYPE_BYTES[type]; char* jumpType = (char*)value;
if(isJumpJsonVType) jumpType += CHAR_BYTES;
int tlen = IS_VAR_DATA_TYPE(type) ? varDataTLen(jumpType) : TYPE_BYTES[type];
if(isJumpJsonVType) tlen += CHAR_BYTES; // add type size
if (tlen > pBuilder->alloc - pBuilder->size) { if (tlen > pBuilder->alloc - pBuilder->size) {
while (tlen > pBuilder->alloc - pBuilder->size) { while (tlen > pBuilder->alloc - pBuilder->size) {
assert(pBuilder->alloc > 0);
pBuilder->alloc *= 2; pBuilder->alloc *= 2;
} }
void* buf = realloc(pBuilder->buf, pBuilder->alloc); void* buf = realloc(pBuilder->buf, pBuilder->alloc);
...@@ -608,22 +614,17 @@ typedef void *SMemRow; ...@@ -608,22 +614,17 @@ typedef void *SMemRow;
#define SMEM_ROW_DATA 0x0U // SDataRow #define SMEM_ROW_DATA 0x0U // SDataRow
#define SMEM_ROW_KV 0x01U // SKVRow #define SMEM_ROW_KV 0x01U // SKVRow
#define SMEM_ROW_CONVERT 0x80U // SMemRow convert flag
#define KVRatioKV (0.2f) // all bool
#define KVRatioPredict (0.4f)
#define KVRatioData (0.75f) // all bigint
#define KVRatioConvert (0.9f) #define KVRatioConvert (0.9f)
#define memRowType(r) ((*(uint8_t *)(r)) & 0x01) #define memRowType(r) ((*(uint8_t *)(r)) & 0x01)
#define memRowSetType(r, t) ((*(uint8_t *)(r)) = (t)) // set the total byte in case of dirty memory #define memRowSetType(r, t) ((*(uint8_t *)(r)) = (t)) // set the total byte in case of dirty memory
#define memRowSetConvert(r) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0x7F) | SMEM_ROW_CONVERT)) // highest bit
#define isDataRowT(t) (SMEM_ROW_DATA == (((uint8_t)(t)) & 0x01)) #define isDataRowT(t) (SMEM_ROW_DATA == (((uint8_t)(t)) & 0x01))
#define isDataRow(r) (SMEM_ROW_DATA == memRowType(r)) #define isDataRow(r) (SMEM_ROW_DATA == memRowType(r))
#define isKvRowT(t) (SMEM_ROW_KV == (((uint8_t)(t)) & 0x01)) #define isKvRowT(t) (SMEM_ROW_KV == (((uint8_t)(t)) & 0x01))
#define isKvRow(r) (SMEM_ROW_KV == memRowType(r)) #define isKvRow(r) (SMEM_ROW_KV == memRowType(r))
#define isNeedConvertRow(r) (((*(uint8_t *)(r)) & 0x80) == SMEM_ROW_CONVERT) #define isUtilizeKVRow(k, d) ((k) < ((d)*KVRatioConvert))
#define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag #define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag
#define memRowKvBody(r) \ #define memRowKvBody(r) \
...@@ -652,7 +653,7 @@ static FORCE_INLINE char *memRowEnd(SMemRow row) { ...@@ -652,7 +653,7 @@ static FORCE_INLINE char *memRowEnd(SMemRow row) {
#define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE)) #define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE))
#define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version #define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version
#define memRowSetKvVersion(r, v) (memRowKvVersion(r) = (v)) #define memRowSetKvVersion(r, v) (memRowKvVersion(r) = (v))
#define memRowTuple(r) (isDataRow(r) ? dataRowTuple(memRowDataBody(r)) : kvRowValues(memRowKvBody(r))) #define memRowKeys(r) (isDataRow(r) ? dataRowTuple(memRowDataBody(r)) : kvRowKeys(memRowKvBody(r)))
#define memRowTKey(r) (isDataRow(r) ? dataRowTKey(memRowDataBody(r)) : kvRowTKey(memRowKvBody(r))) #define memRowTKey(r) (isDataRow(r) ? dataRowTKey(memRowDataBody(r)) : kvRowTKey(memRowKvBody(r)))
#define memRowKey(r) (isDataRow(r) ? dataRowKey(memRowDataBody(r)) : kvRowKey(memRowKvBody(r))) #define memRowKey(r) (isDataRow(r) ? dataRowKey(memRowDataBody(r)) : kvRowKey(memRowKvBody(r)))
......
...@@ -41,6 +41,55 @@ struct SSchema; ...@@ -41,6 +41,55 @@ struct SSchema;
#define QUERY_COND_REL_PREFIX_MATCH_LEN 6 #define QUERY_COND_REL_PREFIX_MATCH_LEN 6
#define QUERY_COND_REL_PREFIX_NMATCH_LEN 7 #define QUERY_COND_REL_PREFIX_NMATCH_LEN 7
#define TSDB_FUNC_FLAG_SCALAR 0x4000
#define TSDB_FUNC_IS_SCALAR(id) ((((id) > 0)) && (((id) & TSDB_FUNC_FLAG_SCALAR) != 0))
#define TSDB_FUNC_SCALAR_INDEX(id) ((id) & ~TSDB_FUNC_FLAG_SCALAR)
///////////////////////////////////////////
// SCALAR FUNCTIONS
#define TSDB_FUNC_SCALAR_POW (TSDB_FUNC_FLAG_SCALAR | 0x0000)
#define TSDB_FUNC_SCALAR_LOG (TSDB_FUNC_FLAG_SCALAR | 0x0001)
#define TSDB_FUNC_SCALAR_ABS (TSDB_FUNC_FLAG_SCALAR | 0x0002)
#define TSDB_FUNC_SCALAR_ACOS (TSDB_FUNC_FLAG_SCALAR | 0x0003)
#define TSDB_FUNC_SCALAR_ASIN (TSDB_FUNC_FLAG_SCALAR | 0x0004)
#define TSDB_FUNC_SCALAR_ATAN (TSDB_FUNC_FLAG_SCALAR | 0x0005)
#define TSDB_FUNC_SCALAR_COS (TSDB_FUNC_FLAG_SCALAR | 0x0006)
#define TSDB_FUNC_SCALAR_SIN (TSDB_FUNC_FLAG_SCALAR | 0x0007)
#define TSDB_FUNC_SCALAR_TAN (TSDB_FUNC_FLAG_SCALAR | 0x0008)
#define TSDB_FUNC_SCALAR_SQRT (TSDB_FUNC_FLAG_SCALAR | 0x0009)
#define TSDB_FUNC_SCALAR_CEIL (TSDB_FUNC_FLAG_SCALAR | 0x000A)
#define TSDB_FUNC_SCALAR_FLOOR (TSDB_FUNC_FLAG_SCALAR | 0x000B)
#define TSDB_FUNC_SCALAR_ROUND (TSDB_FUNC_FLAG_SCALAR | 0x000C)
#define TSDB_FUNC_SCALAR_CONCAT (TSDB_FUNC_FLAG_SCALAR | 0x000D)
#define TSDB_FUNC_SCALAR_LENGTH (TSDB_FUNC_FLAG_SCALAR | 0x000E)
#define TSDB_FUNC_SCALAR_CONCAT_WS (TSDB_FUNC_FLAG_SCALAR | 0x000F)
#define TSDB_FUNC_SCALAR_CHAR_LENGTH (TSDB_FUNC_FLAG_SCALAR | 0x0010)
#define TSDB_FUNC_SCALAR_CAST (TSDB_FUNC_FLAG_SCALAR | 0x0011)
#define TSDB_FUNC_SCALAR_MAX_NUM 18
#define TSDB_FUNC_SCALAR_NAME_MAX_LEN 16
typedef struct {
int16_t type;
int16_t bytes;
int16_t numOfRows;
char* data;
} tExprOperandInfo;
typedef void (*_expr_scalar_function_t)(int16_t functionId, tExprOperandInfo* pInputs, int32_t numInputs, tExprOperandInfo* pOutput, int32_t order);
_expr_scalar_function_t getExprScalarFunction(uint16_t scalar);
typedef struct tScalarFunctionInfo{
int16_t functionId; // scalar function id & ~TSDB_FUNC_FLAG_SCALAR == index
char name[TSDB_FUNC_SCALAR_NAME_MAX_LEN];
_expr_scalar_function_t scalarFunc;
} tScalarFunctionInfo;
/* global scalar sql functions array */
extern struct tScalarFunctionInfo aScalarFunctions[TSDB_FUNC_SCALAR_MAX_NUM];
typedef bool (*__result_filter_fn_t)(const void *, void *); typedef bool (*__result_filter_fn_t)(const void *, void *);
typedef void (*__do_filter_suppl_fn_t)(void *, void *); typedef void (*__do_filter_suppl_fn_t)(void *, void *);
...@@ -49,6 +98,8 @@ enum { ...@@ -49,6 +98,8 @@ enum {
TSQL_NODE_EXPR = 0x1, TSQL_NODE_EXPR = 0x1,
TSQL_NODE_COL = 0x2, TSQL_NODE_COL = 0x2,
TSQL_NODE_VALUE = 0x4, TSQL_NODE_VALUE = 0x4,
TSQL_NODE_FUNC = 0x8,
TSQL_NODE_TYPE = 0x10
}; };
/** /**
...@@ -74,8 +125,19 @@ typedef struct tExprNode { ...@@ -74,8 +125,19 @@ typedef struct tExprNode {
} _node; } _node;
struct SSchema *pSchema; struct SSchema *pSchema;
tVariant *pVal; tVariant *pVal;
struct {
int16_t functionId;
int32_t numChildren;
struct tExprNode **pChildren;
} _func;
TAOS_FIELD *pType;
}; };
int16_t resultType;
int16_t resultBytes;
} tExprNode; } tExprNode;
typedef struct SExprTraverseSupp { typedef struct SExprTraverseSupp {
...@@ -86,6 +148,8 @@ typedef struct SExprTraverseSupp { ...@@ -86,6 +148,8 @@ typedef struct SExprTraverseSupp {
void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)); void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *));
int32_t exprTreeValidateTree(char* msgbuf, tExprNode *pExpr);
void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
tExprNode* exprTreeFromBinary(const void* data, size_t size); tExprNode* exprTreeFromBinary(const void* data, size_t size);
tExprNode* exprdup(tExprNode* pTree); tExprNode* exprdup(tExprNode* pTree);
...@@ -94,8 +158,8 @@ void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree); ...@@ -94,8 +158,8 @@ void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param); bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order, void exprTreeNodeTraverse(tExprNode *pExpr, int32_t numOfRows, tExprOperandInfo *output, void *param, int32_t order,
char *(*cb)(void *, const char*, int32_t)); char *(*getSourceDataBlock)(void *, const char*, int32_t));
void buildFilterSetFromBinary(void **q, const char *buf, int32_t len); void buildFilterSetFromBinary(void **q, const char *buf, int32_t len);
......
...@@ -42,6 +42,7 @@ extern int8_t tsArbOnline; ...@@ -42,6 +42,7 @@ extern int8_t tsArbOnline;
extern int64_t tsArbOnlineTimestamp; extern int64_t tsArbOnlineTimestamp;
extern int32_t tsDnodeId; extern int32_t tsDnodeId;
extern int64_t tsDnodeStartTime; extern int64_t tsDnodeStartTime;
extern int8_t tsDnodeNopLoop;
// common // common
extern int tsRpcTimer; extern int tsRpcTimer;
......
...@@ -25,7 +25,7 @@ extern "C" { ...@@ -25,7 +25,7 @@ extern "C" {
// variant, each number/string/field_id has a corresponding struct during parsing sql // variant, each number/string/field_id has a corresponding struct during parsing sql
typedef struct tVariant { typedef struct tVariant {
uint32_t nType; int32_t nType; // change uint to int, because in tVariantCreate() pVar->nType = -1; // -1 means error type
int32_t nLen; // only used for string, for number, it is useless int32_t nLen; // only used for string, for number, it is useless
union { union {
int64_t i64; int64_t i64;
...@@ -39,7 +39,9 @@ typedef struct tVariant { ...@@ -39,7 +39,9 @@ typedef struct tVariant {
bool tVariantIsValid(tVariant *pVar); bool tVariantIsValid(tVariant *pVar);
void tVariantCreate(tVariant *pVar, SStrToken *token, bool needRmquoteEscape); void tVariantCreate(tVariant *pVar, SStrToken *token);
void tVariantCreateExt(tVariant *pVar, SStrToken *token, int32_t optrType, bool needRmquoteEscape);
void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32_t type); void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32_t type);
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include "tutil.h" #include "tutil.h"
#include "tarithoperator.h" #include "tarithoperator.h"
#include "tcompare.h" #include "tcompare.h"
#include "texpr.h"
//GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); //GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i]));
......
此差异已折叠。
...@@ -47,6 +47,7 @@ int64_t tsArbOnlineTimestamp = TSDB_ARB_DUMMY_TIME; ...@@ -47,6 +47,7 @@ int64_t tsArbOnlineTimestamp = TSDB_ARB_DUMMY_TIME;
char tsEmail[TSDB_FQDN_LEN] = {0}; char tsEmail[TSDB_FQDN_LEN] = {0};
int32_t tsDnodeId = 0; int32_t tsDnodeId = 0;
int64_t tsDnodeStartTime = 0; int64_t tsDnodeStartTime = 0;
int8_t tsDnodeNopLoop = 0;
// common // common
int32_t tsRpcTimer = 300; int32_t tsRpcTimer = 300;
...@@ -622,6 +623,16 @@ static void doInitGlobalConfig(void) { ...@@ -622,6 +623,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "dnodeNopLoop";
cfg.ptr = &tsDnodeNopLoop;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 0;
cfg.maxValue = 1;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "balance"; cfg.option = "balance";
cfg.ptr = &tsEnableBalance; cfg.ptr = &tsEnableBalance;
cfg.valType = TAOS_CFG_VTYPE_INT8; cfg.valType = TAOS_CFG_VTYPE_INT8;
...@@ -1332,7 +1343,7 @@ static void doInitGlobalConfig(void) { ...@@ -1332,7 +1343,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "httpDbNameMandatory"; cfg.option = "httpDbNameMandatory";
cfg.ptr = &tsHttpDbNameMandatory; cfg.ptr = &tsHttpDbNameMandatory;
cfg.valType = TAOS_CFG_VTYPE_INT8; cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0; cfg.minValue = 0;
cfg.maxValue = 1; cfg.maxValue = 1;
cfg.ptrLength = 0; cfg.ptrLength = 0;
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include "tname.h" #include "tname.h"
#include "ttoken.h" #include "ttoken.h"
#include "tvariant.h" #include "tvariant.h"
#include "tglobal.h"
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS) #define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
#define VALIDNUMOFTAGS(x) ((x) >= 0 && (x) <= TSDB_MAX_TAGS) #define VALIDNUMOFTAGS(x) ((x) >= 0 && (x) <= TSDB_MAX_TAGS)
...@@ -251,6 +252,9 @@ static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen ...@@ -251,6 +252,9 @@ static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen
int32_t rowLen = 0; int32_t rowLen = 0;
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < numOfCols; ++i) {
if (pSchema[i].type == TSDB_DATA_TYPE_JSON && numOfCols != 1){
return false;
}
// 1. valid types // 1. valid types
if (!isValidDataType(pSchema[i].type)) { if (!isValidDataType(pSchema[i].type)) {
return false; return false;
...@@ -301,8 +305,12 @@ bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTag ...@@ -301,8 +305,12 @@ bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTag
if (!doValidateSchema(pSchema, numOfCols, TSDB_MAX_BYTES_PER_ROW)) { if (!doValidateSchema(pSchema, numOfCols, TSDB_MAX_BYTES_PER_ROW)) {
return false; return false;
} }
int32_t maxTagLen = TSDB_MAX_TAGS_LEN;
if (numOfTags == 1 && pSchema[numOfCols].type == TSDB_DATA_TYPE_JSON){
maxTagLen = TSDB_MAX_JSON_TAGS_LEN;
}
if (!doValidateSchema(&pSchema[numOfCols], numOfTags, TSDB_MAX_TAGS_LEN)) { if (!doValidateSchema(&pSchema[numOfCols], numOfTags, maxTagLen)) {
return false; return false;
} }
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include "ttokendef.h" #include "ttokendef.h"
#include "tscompression.h" #include "tscompression.h"
const int32_t TYPE_BYTES[15] = { const int32_t TYPE_BYTES[16] = {
-1, // TSDB_DATA_TYPE_NULL -1, // TSDB_DATA_TYPE_NULL
sizeof(int8_t), // TSDB_DATA_TYPE_BOOL sizeof(int8_t), // TSDB_DATA_TYPE_BOOL
sizeof(int8_t), // TSDB_DATA_TYPE_TINYINT sizeof(int8_t), // TSDB_DATA_TYPE_TINYINT
...@@ -34,6 +34,7 @@ const int32_t TYPE_BYTES[15] = { ...@@ -34,6 +34,7 @@ const int32_t TYPE_BYTES[15] = {
sizeof(uint16_t), // TSDB_DATA_TYPE_USMALLINT sizeof(uint16_t), // TSDB_DATA_TYPE_USMALLINT
sizeof(uint32_t), // TSDB_DATA_TYPE_UINT sizeof(uint32_t), // TSDB_DATA_TYPE_UINT
sizeof(uint64_t), // TSDB_DATA_TYPE_UBIGINT sizeof(uint64_t), // TSDB_DATA_TYPE_UBIGINT
sizeof(int8_t), // TSDB_DATA_TYPE_JSON
}; };
#define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \ #define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \
...@@ -367,8 +368,8 @@ static void getStatics_nchr(const void *pData, int32_t numOfRow, int64_t *min, i ...@@ -367,8 +368,8 @@ static void getStatics_nchr(const void *pData, int32_t numOfRow, int64_t *min, i
*maxIndex = 0; *maxIndex = 0;
} }
tDataTypeDescriptor tDataTypes[15] = { tDataTypeDescriptor tDataTypes[16] = {
{TSDB_DATA_TYPE_NULL, 6, 1, "NOTYPE", 0, 0, NULL, NULL, NULL}, {TSDB_DATA_TYPE_NULL, 6, 1, "NOTYPE", 0, 0, NULL, NULL, NULL},
{TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", false, true, tsCompressBool, tsDecompressBool, getStatics_bool}, {TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL", false, true, tsCompressBool, tsDecompressBool, getStatics_bool},
{TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", INT8_MIN, INT8_MAX, tsCompressTinyint, tsDecompressTinyint, getStatics_i8}, {TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT", INT8_MIN, INT8_MAX, tsCompressTinyint, tsDecompressTinyint, getStatics_i8},
{TSDB_DATA_TYPE_SMALLINT, 8, SHORT_BYTES, "SMALLINT", INT16_MIN, INT16_MAX, tsCompressSmallint, tsDecompressSmallint, getStatics_i16}, {TSDB_DATA_TYPE_SMALLINT, 8, SHORT_BYTES, "SMALLINT", INT16_MIN, INT16_MAX, tsCompressSmallint, tsDecompressSmallint, getStatics_i16},
...@@ -376,13 +377,14 @@ tDataTypeDescriptor tDataTypes[15] = { ...@@ -376,13 +377,14 @@ tDataTypeDescriptor tDataTypes[15] = {
{TSDB_DATA_TYPE_BIGINT, 6, LONG_BYTES, "BIGINT", INT64_MIN, INT64_MAX, tsCompressBigint, tsDecompressBigint, getStatics_i64}, {TSDB_DATA_TYPE_BIGINT, 6, LONG_BYTES, "BIGINT", INT64_MIN, INT64_MAX, tsCompressBigint, tsDecompressBigint, getStatics_i64},
{TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", 0, 0, tsCompressFloat, tsDecompressFloat, getStatics_f}, {TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", 0, 0, tsCompressFloat, tsDecompressFloat, getStatics_f},
{TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", 0, 0, tsCompressDouble, tsDecompressDouble, getStatics_d}, {TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", 0, 0, tsCompressDouble, tsDecompressDouble, getStatics_d},
{TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY", 0, 0, tsCompressString, tsDecompressString, getStatics_bin}, {TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY", 0, 0, tsCompressString, tsDecompressString, getStatics_bin},
{TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", INT64_MIN, INT64_MAX, tsCompressTimestamp, tsDecompressTimestamp, getStatics_i64}, {TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", INT64_MIN, INT64_MAX, tsCompressTimestamp, tsDecompressTimestamp, getStatics_i64},
{TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr}, {TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr},
{TSDB_DATA_TYPE_UTINYINT, 16, CHAR_BYTES, "TINYINT UNSIGNED", 0, UINT8_MAX, tsCompressTinyint, tsDecompressTinyint, getStatics_u8}, {TSDB_DATA_TYPE_UTINYINT, 16, CHAR_BYTES, "TINYINT UNSIGNED", 0, UINT8_MAX, tsCompressTinyint, tsDecompressTinyint, getStatics_u8},
{TSDB_DATA_TYPE_USMALLINT, 17, SHORT_BYTES, "SMALLINT UNSIGNED", 0, UINT16_MAX, tsCompressSmallint, tsDecompressSmallint, getStatics_u16}, {TSDB_DATA_TYPE_USMALLINT, 17, SHORT_BYTES, "SMALLINT UNSIGNED", 0, UINT16_MAX, tsCompressSmallint, tsDecompressSmallint, getStatics_u16},
{TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", 0, UINT32_MAX, tsCompressInt, tsDecompressInt, getStatics_u32}, {TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", 0, UINT32_MAX, tsCompressInt, tsDecompressInt, getStatics_u32},
{TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", 0, UINT64_MAX, tsCompressBigint, tsDecompressBigint, getStatics_u64}, {TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", 0, UINT64_MAX, tsCompressBigint, tsDecompressBigint, getStatics_u64},
{TSDB_DATA_TYPE_JSON,4, TSDB_MAX_JSON_TAGS_LEN, "JSON", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr},
}; };
char tTokenTypeSwitcher[13] = { char tTokenTypeSwitcher[13] = {
...@@ -428,7 +430,7 @@ FORCE_INLINE void* getDataMax(int32_t type) { ...@@ -428,7 +430,7 @@ FORCE_INLINE void* getDataMax(int32_t type) {
bool isValidDataType(int32_t type) { bool isValidDataType(int32_t type) {
return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_UBIGINT; return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_JSON;
} }
void setVardataNull(void* val, int32_t type) { void setVardataNull(void* val, int32_t type) {
...@@ -438,6 +440,9 @@ void setVardataNull(void* val, int32_t type) { ...@@ -438,6 +440,9 @@ void setVardataNull(void* val, int32_t type) {
} else if (type == TSDB_DATA_TYPE_NCHAR) { } else if (type == TSDB_DATA_TYPE_NCHAR) {
varDataSetLen(val, sizeof(int32_t)); varDataSetLen(val, sizeof(int32_t));
*(uint32_t*) varDataVal(val) = TSDB_DATA_NCHAR_NULL; *(uint32_t*) varDataVal(val) = TSDB_DATA_NCHAR_NULL;
} else if (type == TSDB_DATA_TYPE_JSON) {
varDataSetLen(val, sizeof(int32_t));
*(uint32_t*) varDataVal(val) = TSDB_DATA_JSON_NULL;
} else { } else {
assert(0); assert(0);
} }
...@@ -505,6 +510,7 @@ void setNullN(void *val, int32_t type, int32_t bytes, int32_t numOfElems) { ...@@ -505,6 +510,7 @@ void setNullN(void *val, int32_t type, int32_t bytes, int32_t numOfElems) {
break; break;
case TSDB_DATA_TYPE_NCHAR: case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_JSON:
for (int32_t i = 0; i < numOfElems; ++i) { for (int32_t i = 0; i < numOfElems; ++i) {
setVardataNull(POINTER_SHIFT(val, i * bytes), type); setVardataNull(POINTER_SHIFT(val, i * bytes), type);
} }
......
此差异已折叠。

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 16
VisualStudioVersion = 16.0.30114.105
MinimumVisualStudioVersion = 10.0.40219.1
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{A1FB5B66-E32F-4789-9BE9-042E5BD21087}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TDengineDriver", "src\TDengineDriver\TDengineDriver.csproj", "{5BED7402-0A65-4ED9-A491-C56BFB518045}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{CB8E6458-31E1-4351-B704-1B918E998654}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "XUnitTest", "src\test\XUnitTest\XUnitTest.csproj", "{64C0A478-2591-4459-9F8F-A70F37976A41}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Cases", "src\test\Cases\Cases.csproj", "{19A69D26-66BF-4227-97BE-9B087BC76B2F}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Debug|x64 = Debug|x64
Debug|x86 = Debug|x86
Release|Any CPU = Release|Any CPU
Release|x64 = Release|x64
Release|x86 = Release|x86
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{5BED7402-0A65-4ED9-A491-C56BFB518045}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{5BED7402-0A65-4ED9-A491-C56BFB518045}.Debug|Any CPU.Build.0 = Debug|Any CPU
{5BED7402-0A65-4ED9-A491-C56BFB518045}.Debug|x64.ActiveCfg = Debug|Any CPU
{5BED7402-0A65-4ED9-A491-C56BFB518045}.Debug|x64.Build.0 = Debug|Any CPU
{5BED7402-0A65-4ED9-A491-C56BFB518045}.Debug|x86.ActiveCfg = Debug|Any CPU
{5BED7402-0A65-4ED9-A491-C56BFB518045}.Debug|x86.Build.0 = Debug|Any CPU
{5BED7402-0A65-4ED9-A491-C56BFB518045}.Release|Any CPU.ActiveCfg = Release|Any CPU
{5BED7402-0A65-4ED9-A491-C56BFB518045}.Release|Any CPU.Build.0 = Release|Any CPU
{5BED7402-0A65-4ED9-A491-C56BFB518045}.Release|x64.ActiveCfg = Release|Any CPU
{5BED7402-0A65-4ED9-A491-C56BFB518045}.Release|x64.Build.0 = Release|Any CPU
{5BED7402-0A65-4ED9-A491-C56BFB518045}.Release|x86.ActiveCfg = Release|Any CPU
{5BED7402-0A65-4ED9-A491-C56BFB518045}.Release|x86.Build.0 = Release|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Debug|Any CPU.Build.0 = Debug|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Debug|x64.ActiveCfg = Debug|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Debug|x64.Build.0 = Debug|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Debug|x86.ActiveCfg = Debug|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Debug|x86.Build.0 = Debug|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Release|Any CPU.ActiveCfg = Release|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Release|Any CPU.Build.0 = Release|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Release|x64.ActiveCfg = Release|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Release|x64.Build.0 = Release|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Release|x86.ActiveCfg = Release|Any CPU
{64C0A478-2591-4459-9F8F-A70F37976A41}.Release|x86.Build.0 = Release|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|Any CPU.Build.0 = Debug|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|x64.ActiveCfg = Debug|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|x64.Build.0 = Debug|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|x86.ActiveCfg = Debug|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Debug|x86.Build.0 = Debug|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|Any CPU.ActiveCfg = Release|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|Any CPU.Build.0 = Release|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|x64.ActiveCfg = Release|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|x64.Build.0 = Release|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|x86.ActiveCfg = Release|Any CPU
{19A69D26-66BF-4227-97BE-9B087BC76B2F}.Release|x86.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
{5BED7402-0A65-4ED9-A491-C56BFB518045} = {A1FB5B66-E32F-4789-9BE9-042E5BD21087}
{CB8E6458-31E1-4351-B704-1B918E998654} = {A1FB5B66-E32F-4789-9BE9-042E5BD21087}
{64C0A478-2591-4459-9F8F-A70F37976A41} = {CB8E6458-31E1-4351-B704-1B918E998654}
{19A69D26-66BF-4227-97BE-9B087BC76B2F} = {CB8E6458-31E1-4351-B704-1B918E998654}
EndGlobalSection
EndGlobal
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net5.0</TargetFramework>
</PropertyGroup>
</Project>
此差异已折叠。
此差异已折叠。
<Project Sdk="Microsoft.NET.Sdk">
<ItemGroup>
<ProjectReference Include="..\..\TDengineDriver\TDengineDriver.csproj" />
</ItemGroup>
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net5.0</TargetFramework>
</PropertyGroup>
</Project>
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册