提交 c3f4d74f 编写于 作者: H Haojun Liao

Merge branch '3.0' into feature/3.0_liaohj

......@@ -18,6 +18,7 @@ mac/
.mypy_cache
*.tmp
*.swp
*.orig
src/connector/nodejs/node_modules/
src/connector/nodejs/out/
tests/test/
......
import hudson.model.Result
import hudson.model.*;
import jenkins.model.CauseOfInterruption
properties([pipelineTriggers([githubPush()])])
node {
git url: 'https://github.com/taosdata/TDengine.git'
}
def skipbuild=0
def win_stop=0
def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME
......@@ -41,72 +41,72 @@ def pre_test(){
killall -9 taosd ||echo "no taosd running"
killall -9 gdb || echo "no gdb running"
killall -9 python3.8 || echo "no python program running"
cd ${WORKSPACE}
cd ${WKC}
git reset --hard HEAD~10 >/dev/null
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WORKSPACE}
cd ${WKC}
git checkout master
'''
}
else if(env.CHANGE_TARGET == '2.0'){
sh '''
cd ${WORKSPACE}
cd ${WKC}
git checkout 2.0
'''
}
else if(env.CHANGE_TARGET == '3.0'){
sh '''
cd ${WORKSPACE}
cd ${WKC}
git checkout 3.0
'''
}
else{
sh '''
cd ${WORKSPACE}
cd ${WKC}
git checkout develop
'''
}
}
sh'''
cd ${WORKSPACE}
cd ${WKC}
git pull >/dev/null
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
git clean -dfx
'''
// script {
// if (env.CHANGE_TARGET == 'master') {
// sh '''
// cd ${WK}
// git checkout master
// '''
// }
// else if(env.CHANGE_TARGET == '2.0'){
// sh '''
// cd ${WK}
// git checkout 2.0
// '''
// }
// else if(env.CHANGE_TARGET == '3.0'){
// sh '''
// cd ${WK}
// git checkout 3.0
// '''
// }
// else{
// sh '''
// cd ${WK}
// git checkout develop
// '''
// }
// }
// sh '''
// cd ${WK}
// git pull >/dev/null
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WK}
git checkout master
'''
}
else if(env.CHANGE_TARGET == '2.0'){
sh '''
cd ${WK}
git checkout 2.0
'''
}
else if(env.CHANGE_TARGET == '3.0'){
sh '''
cd ${WK}
git checkout 3.0
'''
}
else{
sh '''
cd ${WK}
git checkout develop
'''
}
}
sh '''
cd ${WK}
git pull >/dev/null
git clean -dfx
export TZ=Asia/Harbin
date
mkdir debug
......@@ -121,6 +121,7 @@ def pre_test(){
pipeline {
agent none
options { skipDefaultCheckout() }
environment{
WK = '/var/lib/jenkins/workspace/TDinternal'
WKC= '/var/lib/jenkins/workspace/TDinternal/community'
......@@ -128,6 +129,7 @@ pipeline {
stages {
stage('pre_build'){
agent{label 'master'}
options { skipDefaultCheckout() }
when {
changeRequest()
}
......@@ -136,269 +138,228 @@ pipeline {
abort_previous()
abortPreviousBuilds()
}
sh'''
rm -rf ${WORKSPACE}.tes
cp -r ${WORKSPACE} ${WORKSPACE}.tes
cd ${WORKSPACE}.tes
git fetch
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
git checkout master
'''
}
else if(env.CHANGE_TARGET == '2.0'){
sh '''
git checkout 2.0
'''
}
else if(env.CHANGE_TARGET == '3.0'){
sh '''
git checkout 3.0
'''
}
else{
sh '''
git checkout develop
'''
}
}
sh'''
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
'''
script{
skipbuild='2'
skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true)
println skipbuild
}
sh'''
rm -rf ${WORKSPACE}.tes
'''
pre_test()
}
}
stage('Parallel test stage') {
//only build pr
when {
allOf{
changeRequest()
expression{
return skipbuild.trim() == '2'
}
}
}
parallel {
stage('python_1_s1') {
agent{label " slave1 || slave11 "}
steps {
// stage('Parallel test stage') {
// //only build pr
// options { skipDefaultCheckout() }
// when {
// allOf{
// changeRequest()
// }
// }
// parallel {
// stage('python_1_s1') {
// agent{label " slave1 || slave11 "}
// steps {
pre_test()
// timeout(time: 55, unit: 'MINUTES'){
// sh '''
// date
// cd ${WKC}/tests
// ./test-all.sh p1
// date'''
// }
// pre_test()
// // timeout(time: 55, unit: 'MINUTES'){
// // sh '''
// // date
// // cd ${WKC}/tests
// // ./test-all.sh p1
// // date'''
// // }
}
}
stage('python_2_s5') {
agent{label " slave5 || slave15 "}
steps {
// }
// }
// stage('python_2_s5') {
// agent{label " slave5 || slave15 "}
// steps {
pre_test()
// timeout(time: 55, unit: 'MINUTES'){
// sh '''
// date
// cd ${WKC}/tests
// ./test-all.sh p2
// date'''
// }
}
}
stage('python_3_s6') {
agent{label " slave6 || slave16 "}
steps {
pre_test()
// timeout(time: 55, unit: 'MINUTES'){
// pre_test()
// // timeout(time: 55, unit: 'MINUTES'){
// // sh '''
// // date
// // cd ${WKC}/tests
// // ./test-all.sh p2
// // date'''
// // }
// }
// }
// stage('python_3_s6') {
// agent{label " slave6 || slave16 "}
// steps {
// pre_test()
// // timeout(time: 55, unit: 'MINUTES'){
// sh '''
// date
// cd ${WKC}/tests
// ./test-all.sh p3
// date'''
// }
}
}
stage('test_b1_s2') {
agent{label " slave2 || slave12 "}
steps {
pre_test()
// timeout(time: 55, unit: 'MINUTES'){
// // sh '''
// // date
// // cd ${WKC}/tests
// // ./test-all.sh p3
// // date'''
// // }
// }
// }
// stage('test_b1_s2') {
// agent{label " slave2 || slave12 "}
// steps {
// pre_test()
// // timeout(time: 55, unit: 'MINUTES'){
// sh '''
// rm -rf /var/lib/taos/*
// rm -rf /var/log/taos/*
// nohup taosd >/dev/null &
// sleep 10
// '''
// sh '''
// cd ${WKC}/tests/examples/nodejs
// npm install td2.0-connector > /dev/null 2>&1
// node nodejsChecker.js host=localhost
// node test1970.js
// cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport
// npm install td2.0-connector > /dev/null 2>&1
// node nanosecondTest.js
// // sh '''
// // rm -rf /var/lib/taos/*
// // rm -rf /var/log/taos/*
// // nohup taosd >/dev/null &
// // sleep 10
// // '''
// // sh '''
// // cd ${WKC}/tests/examples/nodejs
// // npm install td2.0-connector > /dev/null 2>&1
// // node nodejsChecker.js host=localhost
// // node test1970.js
// // cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport
// // npm install td2.0-connector > /dev/null 2>&1
// // node nanosecondTest.js
// '''
// sh '''
// cd ${WKC}/tests/examples/C#/taosdemo
// mcs -out:taosdemo *.cs > /dev/null 2>&1
// echo '' |./taosdemo -c /etc/taos
// cd ${WKC}/tests/connectorTest/C#Test/nanosupport
// mcs -out:nano *.cs > /dev/null 2>&1
// echo '' |./nano
// '''
// sh '''
// cd ${WKC}/tests/gotest
// bash batchtest.sh
// '''
// sh '''
// cd ${WKC}/tests
// ./test-all.sh b1fq
// date'''
// }
}
}
stage('test_crash_gen_s3') {
agent{label " slave3 || slave13 "}
// // '''
// // sh '''
// // cd ${WKC}/tests/examples/C#/taosdemo
// // mcs -out:taosdemo *.cs > /dev/null 2>&1
// // echo '' |./taosdemo -c /etc/taos
// // cd ${WKC}/tests/connectorTest/C#Test/nanosupport
// // mcs -out:nano *.cs > /dev/null 2>&1
// // echo '' |./nano
// // '''
// // sh '''
// // cd ${WKC}/tests/gotest
// // bash batchtest.sh
// // '''
// // sh '''
// // cd ${WKC}/tests
// // ./test-all.sh b1fq
// // date'''
// // }
// }
// }
// stage('test_crash_gen_s3') {
// agent{label " slave3 || slave13 "}
steps {
pre_test()
// timeout(time: 60, unit: 'MINUTES'){
// sh '''
// cd ${WKC}/tests/pytest
// ./crash_gen.sh -a -p -t 4 -s 2000
// '''
// }
// timeout(time: 60, unit: 'MINUTES'){
// // sh '''
// // cd ${WKC}/tests/pytest
// // rm -rf /var/lib/taos/*
// // rm -rf /var/log/taos/*
// // ./handle_crash_gen_val_log.sh
// // '''
// sh '''
// cd ${WKC}/tests/pytest
// rm -rf /var/lib/taos/*
// rm -rf /var/log/taos/*
// ./handle_taosd_val_log.sh
// '''
// }
// timeout(time: 55, unit: 'MINUTES'){
// sh '''
// date
// cd ${WKC}/tests
// ./test-all.sh b2fq
// date
// '''
// }
}
}
stage('test_valgrind_s4') {
agent{label " slave4 || slave14 "}
// steps {
// pre_test()
// // timeout(time: 60, unit: 'MINUTES'){
// // sh '''
// // cd ${WKC}/tests/pytest
// // ./crash_gen.sh -a -p -t 4 -s 2000
// // '''
// // }
// // timeout(time: 60, unit: 'MINUTES'){
// // // sh '''
// // // cd ${WKC}/tests/pytest
// // // rm -rf /var/lib/taos/*
// // // rm -rf /var/log/taos/*
// // // ./handle_crash_gen_val_log.sh
// // // '''
// // sh '''
// // cd ${WKC}/tests/pytest
// // rm -rf /var/lib/taos/*
// // rm -rf /var/log/taos/*
// // ./handle_taosd_val_log.sh
// // '''
// // }
// // timeout(time: 55, unit: 'MINUTES'){
// // sh '''
// // date
// // cd ${WKC}/tests
// // ./test-all.sh b2fq
// // date
// // '''
// // }
// }
// }
// stage('test_valgrind_s4') {
// agent{label " slave4 || slave14 "}
steps {
pre_test()
// catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
// sh '''
// cd ${WKC}/tests/pytest
// ./valgrind-test.sh 2>&1 > mem-error-out.log
// ./handle_val_log.sh
// '''
// }
// timeout(time: 55, unit: 'MINUTES'){
// sh '''
// date
// cd ${WKC}/tests
// ./test-all.sh b3fq
// date'''
// sh '''
// date
// cd ${WKC}/tests
// ./test-all.sh full example
// date'''
// }
}
}
stage('test_b4_s7') {
agent{label " slave7 || slave17 "}
steps {
pre_test()
// timeout(time: 55, unit: 'MINUTES'){
// steps {
// pre_test()
// // catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
// // sh '''
// // cd ${WKC}/tests/pytest
// // ./valgrind-test.sh 2>&1 > mem-error-out.log
// // ./handle_val_log.sh
// // '''
// // }
// // timeout(time: 55, unit: 'MINUTES'){
// // sh '''
// // date
// // cd ${WKC}/tests
// // ./test-all.sh b3fq
// // date'''
// // sh '''
// // date
// // cd ${WKC}/tests
// // ./test-all.sh full example
// // date'''
// // }
// }
// }
// stage('test_b4_s7') {
// agent{label " slave7 || slave17 "}
// steps {
// pre_test()
// // timeout(time: 55, unit: 'MINUTES'){
// sh '''
// date
// cd ${WKC}/tests
// ./test-all.sh b4fq
// cd ${WKC}/tests
// ./test-all.sh p4
// cd ${WKC}/tests
// ./test-all.sh full jdbc
// cd ${WKC}/tests
// ./test-all.sh full unit
// date'''
// }
}
}
stage('test_b5_s8') {
agent{label " slave8 || slave18 "}
steps {
pre_test()
// timeout(time: 55, unit: 'MINUTES'){
// // sh '''
// // date
// // cd ${WKC}/tests
// // ./test-all.sh b4fq
// // cd ${WKC}/tests
// // ./test-all.sh p4
// // cd ${WKC}/tests
// // ./test-all.sh full jdbc
// // cd ${WKC}/tests
// // ./test-all.sh full unit
// // date'''
// // }
// }
// }
// stage('test_b5_s8') {
// agent{label " slave8 || slave18 "}
// steps {
// pre_test()
// // timeout(time: 55, unit: 'MINUTES'){
// sh '''
// date
// cd ${WKC}/tests
// ./test-all.sh b5fq
// date'''
// }
}
}
stage('test_b6_s9') {
agent{label " slave9 || slave19 "}
steps {
pre_test()
// timeout(time: 55, unit: 'MINUTES'){
// // sh '''
// // date
// // cd ${WKC}/tests
// // ./test-all.sh b5fq
// // date'''
// // }
// }
// }
// stage('test_b6_s9') {
// agent{label " slave9 || slave19 "}
// steps {
// pre_test()
// // timeout(time: 55, unit: 'MINUTES'){
// sh '''
// date
// cd ${WKC}/tests
// ./test-all.sh b6fq
// date'''
// }
}
}
stage('test_b7_s10') {
agent{label " slave10 || slave20 "}
steps {
pre_test()
// timeout(time: 55, unit: 'MINUTES'){
// // sh '''
// // date
// // cd ${WKC}/tests
// // ./test-all.sh b6fq
// // date'''
// // }
// }
// }
// stage('test_b7_s10') {
// agent{label " slave10 || slave20 "}
// steps {
// pre_test()
// // timeout(time: 55, unit: 'MINUTES'){
// sh '''
// date
// cd ${WKC}/tests
// ./test-all.sh b7fq
// date'''
// }
}
}
}
}
// // sh '''
// // date
// // cd ${WKC}/tests
// // ./test-all.sh b7fq
// // date'''
// // }
// }
// }
// }
// }
}
post {
success {
......
......@@ -2,7 +2,7 @@
# lucene
ExternalProject_Add(lucene
GIT_REPOSITORY https://github.com/taosdata-contrib/LucenePlusPlus.git
GIT_TAG rel_3.0.8
GIT_TAG rel_3.0.8_td
SOURCE_DIR "${CMAKE_SOURCE_DIR}/deps/lucene"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
......
......@@ -2,3 +2,7 @@
if(${BUILD_WITH_ROCKSDB})
add_subdirectory(rocksdb)
endif(${BUILD_WITH_ROCKSDB})
if(${BUILD_WITH_LUCENE})
add_subdirectory(lucene)
endif(${BUILD_WITH_LUCENE})
add_executable(luceneTest "")
target_sources(luceneTest
PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/main.cpp"
)
target_link_libraries(luceneTest lucene++)
\ No newline at end of file
#include <iostream>
int main(int argc, char const *argv[]) {
std::cout << "Hello, this is lucene test" << std::endl;
return 0;
}
```plantuml
@startuml create_table
skinparam sequenceMessageAlign center
skinparam responseMessageBelowArrow true
participant APP as app
box "dnode1"
participant RPC as rpc
participant VNODE as vnode
participant SYNC as sync
end box
box "dnode2"
participant SYNC as sync2
participant VNODE as vnode2
end box
box "dnode3"
participant SYNC as sync3
participant VNODE as vnode3
end box
' APP send request to dnode and RPC in dnode recv the request
app ->rpc: create table req
' RPC call vnodeProcessReq() function to process the request
rpc -> vnode: vnodeProcessReq
note right
callback function
run in RPC module
threads. The function
only puts the request
to a vnode queue.
end note
' VNODE call vnodeProcessReqs() function to integrate requests and process as a whole
vnode -> vnode: vnodeProcessReqs()
note right
integrate reqs and
process as a whole
end note
' sync the request to other nodes
vnode -> sync: syncProcessReqs()
' make request persistent
' sync -->vnode: walWrite()\n(callback function)
' replicate requests to other DNODES
sync -> sync2: replication req
sync -> sync3: replication req
sync2 -> vnode2: walWrite()\n(callback function)
sync2 --> sync: replication rsp\n(confirm)
sync3 -> vnode3: walWrite()\n(callback function)
sync3 --> sync: replication rsp\n(confirm)
' send apply request
sync -> sync2: apply req
sync -> sync3: apply req
' vnode apply
sync2 -> vnode2: vnodeApplyReqs()
sync3 -> vnode3: vnodeApplyReqs()
' call apply request
sync --> vnode: vnodeApplyReqs()\n(callback function)
' send response
vnode --> rpc: rpcSendRsp()
' dnode send response to APP
rpc --> app: create table rsp
@enduml
```
## Leader处理强一致写入请求
```plantuml
@startuml leader_process_stict_consistency
box "dnode1"
participant CRPC as crpc
participant VNODE as vnode
participant SYNC as sync
end box
-> crpc: create table/submit req
' In CRPC threads
group #pink "In CRPC threads"
crpc -> vnode:vnodeProcessReq()
note right
A callback function
run by CRPC thread
to put the request
to a vnode queue
end note
end
' In VNODE worker threads
group #lightblue "In VNODE worker threads"
vnode -> vnode: vnodeProcessReqs()
note right
VNODE process requests
accumulated in a
vnode write queue and
process the batch reqs
as a whole
end note
vnode -> sync: syncProcessReqs()
sync -> : replication req1
sync -> : replication req2
end
group #red "SYNC threads"
sync <- : replication rsp1
sync <- : replication rsp2
sync -> vnode: notify apply
sync -> : apply rsp1
sync -> : apply rsp2
end
group #lightblue "In VNODE worker threads"
vnode -> vnode: vnodeApplyReqs()
vnode -> crpc:
end
<- crpc: create table/submit rsp
@enduml
```
## Follower处理强一致写入请求
```plantuml
@startuml follower_process_strict_consistency
participant SYNC as sync
participant VNODE as vnode
group #pink "SYNC threads"
-> sync: replication req
sync -> sync: syncProcessReqs()
note right
In the replication
only data is
persisted and response
is sent back
end note
<- sync: replication rsp
-> sync: apply req
sync -> vnode: notify apply
end
group #lightblue "VNODE worker threads"
vnode -> vnode: vnodeApplyReqs()
end
@enduml
```
## Leader处理最终一致写入请求
```plantuml
@startuml leader_process_eventual_consistency
box "dnode1"
participant CRPC as crpc
participant VNODE as vnode
participant SYNC as sync
end box
-> crpc: create table/submit req
' In CRPC threads
group #pink "In CRPC threads"
crpc -> vnode:vnodeProcessReq()
note right
A callback function
run by CRPC thread
to put the request
to a vnode queue
end note
end
' In VNODE worker threads
group #lightblue "In VNODE worker threads"
vnode -> vnode: vnodeProcessReqs()
note right
VNODE process requests
accumulated in a
vnode write queue and
process the batch reqs
as a whole
end note
vnode -> sync: syncProcessReqs()
sync -> : replication req1
sync -> : replication req2
sync -> vnode: notify apply
end
group #lightblue "In VNODE worker threads"
vnode -> vnode: vnodeApplyReqs()
vnode -> crpc:
end
<- crpc: create table/submit rsp
@enduml
```
## Follower处理最终一致写入请求
```plantuml
@startuml follower_process_eventual_consistency
participant SYNC as sync
participant VNODE as vnode
group #pink "SYNC threads"
-> sync: replication rsp
sync -> sync: syncProcessReqs()
sync -> vnode: notify VNODE \nthread to process\n the reqs
end
group #lightblue "VNODE worker threads"
vnode -> vnode: vnodeApplyReqs()
end
@enduml
```
\ No newline at end of file
<center><h1>VNODE Write Processes</h1></center>
## META Operations
META data write operations including:
1. create table
2. drop table
3. alter table
We take create table as an example to figure out the whole process.
```plantuml
@startuml create_table
skinparam sequenceMessageAlign center
skinparam responseMessageBelowArrow true
participant APP as app
box "dnode1"
participant RPC as rpc
participant VNODE as vnode
participant SYNC as sync
end box
box "dnode2"
participant SYNC as sync2
participant VNODE as vnode2
end box
box "dnode3"
participant SYNC as sync3
participant VNODE as vnode3
end box
' APP send request to dnode and RPC in dnode recv the request
app ->rpc: create table req
' RPC call vnodeProcessReq() function to process the request
rpc -> vnode: vnodeProcessReq
note right
callback function
run in RPC module
threads. The function
only puts the request
to a vnode queue.
end note
' VNODE call vnodeProcessReqs() function to integrate requests and process as a whole
vnode -> vnode: vnodeProcessReqs()
note right
integrate reqs and
process as a whole
end note
' sync the request to other nodes
vnode -> sync: syncProcessReqs()
' make request persistent
' sync -->vnode: walWrite()\n(callback function)
' replicate requests to other DNODES
sync -> sync2: replication req
sync -> sync3: replication req
sync2 -> vnode2: walWrite()\n(callback function)
sync2 --> sync: replication rsp\n(confirm)
sync3 -> vnode3: walWrite()\n(callback function)
sync3 --> sync: replication rsp\n(confirm)
' send apply request
sync -> sync2: apply req
sync -> sync3: apply req
' vnode apply
sync2 -> vnode2: vnodeApplyReqs()
sync3 -> vnode3: vnodeApplyReqs()
' call apply request
sync --> vnode: vnodeApplyReqs()\n(callback function)
' send response
vnode --> rpc: rpcSendRsp()
' dnode send response to APP
rpc --> app: create table rsp
@enduml
```
## Time-series data Operations
There are only one operations for time-series data: data insert. We will figure out the whole process.
```plantuml
@startuml create_table
skinparam sequenceMessageAlign center
skinparam responseMessageBelowArrow true
participant APP as app
box "dnode1"
participant RPC as rpc
participant VNODE as vnode
participant SYNC as sync
end box
box "dnode2"
participant SYNC as sync2
participant VNODE as vnode2
end box
box "dnode3"
participant SYNC as sync3
participant VNODE as vnode3
end box
' APP send request to dnode and RPC in dnode recv the request
app ->rpc: insert data req
' RPC call vnodeProcessReq() function to process the request
rpc -> vnode: vnodeProcessReq
note right
callback function
run in RPC module
threads. The function
only puts the request
to a vnode queue.
end note
' VNODE call vnodeProcessReqs() function to integrate requests and process as a whole
vnode -> vnode: vnodeProcessReqs()
note right
integrate reqs and
process as a whole
end note
' sync the request to other nodes
vnode -> sync: syncProcessReqs()
' ' make request persistent
' ' sync -->vnode: walWrite()\n(callback function)
' ' replicate requests to other DNODES
sync -> sync2: replication req
sync -> sync3: replication req
' vnode apply
sync2 -> vnode2: vnodeApplyReqs()
sync3 -> vnode3: vnodeApplyReqs()
' call apply request
sync --> vnode: vnodeApplyReqs()\n(callback function)
' send response
vnode --> rpc: rpcSendRsp()
' dnode send response to APP
rpc --> app: insert data rsp
@enduml
```
## vnodeProcessReqs()
```plantuml
@startuml vnodeProcessReqs()
participant VNODE as v
participant SYNC as s
group vnodeProcessReqs()
' Group requests and get a request batch to process as a whole
v -> v: vnodeGetReqsFromQueue()
note right
integrate all write
requests as a batch
to process as a whole
end note
' VNODE call syncProcessReqs() function to process the batch request
v -> s: syncProcessReqs()
group syncProcessReqs()
' Check if current node is leader
alt not leader
return NOT_LEADER
end
s -> s: syncAppendReqsToLogStore()
group syncAppendReqsToLogStore()
s -> v: walWrite()
note right
There must be a
callback function
provided by VNODE
to persist the
requests in WAL
end note
alt (no unapplied reqs) AND (only one node OR no meta requests)
s -> v: vnodeApplyReqs()
note right
just use the woker
thread to apply
the requests. This
is a callback function
provided by VNODE
end note
else other cases need to wait response
s -> s:
note right
save the requests in log store
and wait for comfirmation or
other cases
end note
s ->]: send replication requests
s ->]: send replication requests
end
end
end
end
@enduml
```
<!-- ## syncProcessReplicationReq()
```plantuml
@startuml syncProcessReplicationReq
participant SYNC as s
participant VNODE as v
-> s: replication request
s -> s:
note right
process the request
to get the request
batch
end note
s -> s: syncAppendReqToLogStore()
s -> v: walWrite()
alt has meta req
<- s: comfirmation
else
s -> v: vnodeApplyReqs()
end
@enduml -->
<!-- ``` -->
## vnodeApplyReqs()
The function *vnodeApplyReqs()* is the actual function running by a vnode to process the requests.
```plantuml
@startuml vnodeApplyReqs()
skinparam sequenceMessageAlign left
skinparam responseMessageBelowArrow true
participant VNODE as vnode
participant TQ as tq
participant TSDB as tsdb
participant META as meta
group vnodeApplyReqs()
autonumber
loop nReqs
' Copy request message to vnode buffer pool
vnode -> vnode: vnodeCopyReq()
note right
copy request to
vnode buffer pool
end note
vnode -> tq: tqPush()
note right
push the request
to TQ so consumers
can consume
end note
alt META_REQ
autonumber 3
vnode -> meta: metaApplyReq()
else TS_REQ
autonumber 3
vnode -> tsdb: tsdbApplyReq()
end
end
' Check if need to commit
alt vnode buffer pool is full
group vnodeCommit()
autonumber 4.1
vnode -> tq: tqCommit()
note right
tqCommit may renew wal
end note
vnode -> meta: metaCommit();
note right
commit meta data
end note
vnode -> tsdb: tsdbCommit();
note right
commit time-series data
end note
end
end
end
@enduml
```
<!-- meta操作:建表,删表,改表(队队列/同步)
数据写入
快照文件与sync的结合
vnodeOpen()
vnodeClose()
sync.h -->
......@@ -363,6 +363,8 @@ typedef struct {
} SConnectRsp;
typedef struct {
char user[TSDB_USER_LEN];
char pass[TSDB_KEY_LEN];
int32_t maxUsers;
int32_t maxDbs;
int32_t maxTimeSeries;
......@@ -374,12 +376,6 @@ typedef struct {
int64_t maxInbound;
int64_t maxOutbound;
int8_t accessState; // Configured only by command
} SAcctCfg;
typedef struct {
char user[TSDB_USER_LEN];
char pass[TSDB_KEY_LEN];
SAcctCfg cfg;
} SCreateAcctMsg, SAlterAcctMsg;
typedef struct {
......@@ -660,15 +656,18 @@ typedef struct SVgroupAccess {
} SVgroupAccess;
typedef struct {
int32_t dnodeId;
uint32_t moduleStatus;
uint32_t numOfVnodes;
char clusterId[TSDB_CLUSTER_ID_LEN];
char reserved[16];
int32_t dnodeId;
int8_t dropped;
char reserved[19];
int64_t clusterId;
int32_t numOfDnodes;
int32_t numOfVnodes;
} SDnodeCfg;
typedef struct {
int32_t dnodeId;
int8_t isMnode;
int8_t reserved;
uint16_t dnodePort;
char dnodeFqdn[TSDB_FQDN_LEN];
} SDnodeEp;
......@@ -679,55 +678,29 @@ typedef struct {
} SDnodeEps;
typedef struct {
int32_t mnodeId;
char mnodeEp[TSDB_EP_LEN];
} SMInfo;
typedef struct SMInfos {
int8_t inUse;
int8_t mnodeNum;
SMInfo mnodeInfos[TSDB_MAX_REPLICA];
} SMInfos;
typedef struct {
int32_t numOfMnodes; // tsNumOfMnodes
int32_t mnodeEqualVnodeNum; // tsMnodeEqualVnodeNum
int32_t offlineThreshold; // tsOfflineThreshold
int32_t statusInterval; // tsStatusInterval
int32_t maxtablesPerVnode;
int32_t maxVgroupsPerDb;
char arbitrator[TSDB_EP_LEN]; // tsArbitrator
char reserve[2]; // to solve arm32 bus error
char timezone[64]; // tsTimezone
int64_t checkTime; // 1970-01-01 00:00:00.000
char locale[TSDB_LOCALE_LEN]; // tsLocale
char charset[TSDB_LOCALE_LEN]; // tsCharset
int8_t enableBalance; // tsEnableBalance
int8_t flowCtrl;
int8_t slaveQuery;
int8_t adjustMaster;
int8_t reserved[4];
int32_t statusInterval; // tsStatusInterval
int8_t reserved[36];
int64_t checkTime; // 1970-01-01 00:00:00.000
char timezone[64]; // tsTimezone
char locale[TSDB_LOCALE_LEN]; // tsLocale
char charset[TSDB_LOCALE_LEN]; // tsCharset
} SClusterCfg;
typedef struct SStatusMsg {
uint32_t version;
int32_t dnodeId;
uint32_t lastReboot; // time stamp for last reboot
int32_t openVnodes;
int32_t numOfCores;
float diskAvailable;
int8_t reserved[36];
char dnodeEp[TSDB_EP_LEN];
uint32_t moduleStatus;
uint32_t lastReboot; // time stamp for last reboot
uint16_t reserve1; // from config file
uint16_t openVnodes;
uint16_t numOfCores;
float diskAvailable; // GB
char clusterId[TSDB_CLUSTER_ID_LEN];
uint8_t alternativeRole;
uint8_t reserve2[15];
int64_t clusterId;
SClusterCfg clusterCfg;
SVnodeLoad load[];
} SStatusMsg;
typedef struct {
SMInfos mnodes;
SDnodeCfg dnodeCfg;
SVgroupAccess vgAccess[];
} SStatusRsp;
......@@ -863,9 +836,9 @@ typedef struct {
} SCreateDnodeMsg, SDropDnodeMsg;
typedef struct {
int32_t dnodeId;
char dnodeEp[TSDB_EP_LEN]; // end point, hostname:port
SMInfos mnodes;
int32_t dnodeId;
int32_t mnodeNum;
SDnodeEp mnodeEps[];
} SCreateMnodeMsg;
typedef struct {
......
......@@ -151,8 +151,6 @@ extern int8_t tsPrintAuth;
extern int8_t tscEmbedded;
extern char tsVnodeDir[];
extern char tsMnodeDir[];
extern char tsMnodeBakDir[];
extern char tsMnodeTmpDir[];
extern int64_t tsTickPerDay[3];
extern int32_t tsTopicBianryLen;
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <cli@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TD_RAFT_H
#define TD_RAFT_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#include "taosdef.h"
typedef unsigned int RaftId;
typedef unsigned int RaftGroupId;
// buffer holding data
typedef struct RaftBuffer {
void* data;
size_t len;
} RaftBuffer;
// a single server information in a cluster
typedef struct RaftServer {
RaftId id;
char fqdn[TSDB_FQDN_LEN];
uint16_t port;
} RaftServer;
// all servers in a cluster
typedef struct RaftConfiguration {
RaftServer *servers;
int nServer;
} RaftConfiguration;
// raft lib module
struct Raft;
typedef struct Raft Raft;
struct RaftNode;
typedef struct RaftNode RaftNode;
// raft state machine
struct RaftFSM;
typedef struct RaftFSM {
// statemachine user data
void *data;
// apply buffer data, bufs will be free by raft module
int (*apply)(struct RaftFSM *fsm, const RaftBuffer *bufs[], int nBufs);
// configuration commit callback
int (*onConfigurationCommit)(const RaftConfiguration* cluster);
// fsm return snapshot in ppBuf, bufs will be free by raft module
// TODO: getSnapshot SHOULD be async?
int (*getSnapshot)(struct RaftFSM *fsm, RaftBuffer **ppBuf);
// fsm restore with pBuf data
int (*restore)(struct RaftFSM *fsm, RaftBuffer *pBuf);
// fsm send data in buf to server,buf will be free by raft module
int (*send)(struct RaftFSM* fsm, const RaftServer* server, const RaftBuffer *buf);
} RaftFSM;
typedef struct RaftNodeOptions {
// user define state machine
RaftFSM* pFSM;
// election timeout(in ms)
// by default: 1000
int electionTimeoutMS;
// heart timeout(in ms)
// by default: 100
int heartbeatTimeoutMS;
// install snapshot timeout(in ms)
int installSnapshotTimeoutMS;
/**
* number of log entries before starting a new snapshot.
* by default: 1024
*/
int snapshotThreshold;
/**
* Number of log entries to keep in the log after a snapshot has
* been taken.
* by default: 128.
*/
int snapshotTrailing;
/**
* Enable or disable pre-vote support.
* by default: false
*/
bool preVote;
} RaftNodeOptions;
// create raft lib
int RaftCreate(Raft** ppRaft);
int RaftDestroy(Raft* pRaft);
// start a raft node with options,node id,group id
int RaftStart(Raft* pRaft,
RaftId selfId,
RaftGroupId selfGroupId,
const RaftConfiguration* cluster,
const RaftNodeOptions* options,
RaftNode **ppNode);
// stop a raft node
int RaftStop(RaftNode* pNode);
// client apply a cmd in buf
typedef void (*RaftApplyFp)(const RaftBuffer *pBuf, int result);
int RaftApply(RaftNode *pNode,
const RaftBuffer *pBuf,
RaftApplyFp applyCb);
// recv data from other servers in cluster,buf will be free in raft
int RaftRecv(RaftNode *pNode, const RaftBuffer* pBuf);
// change cluster servers API
typedef void (*RaftChangeFp)(const RaftServer* pServer, int result);
int RaftAddServer(RaftNode *pNode,
const RaftServer* pServer,
RaftChangeFp changeCb);
int RaftRemoveServer(RaftNode *pNode,
const RaftServer* pServer,
RaftChangeFp changeCb);
// transfer leader to id
typedef void (*RaftTransferFp)(RaftId id, int result);
int RaftTransfer(RaftNode *pNode,
RaftId id,
RaftTransferFp transferCb);
#ifdef __cplusplus
}
#endif
#endif /* TD_RAFT_H */
\ No newline at end of file
/*
* Copyright (c) 2019 TAOS Data, Inc. <cli@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_LIBS_SYNC_H
#define _TD_LIBS_SYNC_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
#include "taosdef.h"
#include "wal.h"
typedef int64_t SyncNodeId;
typedef int32_t SyncGroupId;
typedef int64_t SyncIndex;
typedef uint64_t SSyncTerm;
typedef enum {
TAOS_SYNC_ROLE_FOLLOWER = 0,
TAOS_SYNC_ROLE_CANDIDATE = 1,
TAOS_SYNC_ROLE_LEADER = 2,
} ESyncRole;
typedef struct {
void* data;
size_t len;
} SSyncBuffer;
typedef struct {
uint16_t nodePort; // node sync Port
char nodeFqdn[TSDB_FQDN_LEN]; // node FQDN
} SNodeInfo;
typedef struct {
int selfIndex;
int nNode;
SNodeInfo* nodeInfo;
} SSyncCluster;
typedef struct {
int32_t selfIndex;
int nNode;
SNodeInfo* node;
ESyncRole* role;
} SNodesRole;
typedef struct SSyncFSM {
void* pData;
// apply committed log, bufs will be free by raft module
int (*applyLog)(struct SSyncFSM* fsm, SyncIndex index, const SSyncBuffer* buf, void* pData);
// cluster commit callback
int (*onClusterChanged)(struct SSyncFSM* fsm, const SSyncCluster* cluster, void* pData);
// fsm return snapshot in ppBuf, bufs will be free by raft module
// TODO: getSnapshot SHOULD be async?
int (*getSnapshot)(struct SSyncFSM* fsm, SSyncBuffer** ppBuf, int* objId, bool* isLast);
// fsm apply snapshot with pBuf data
int (*applySnapshot)(struct SSyncFSM* fsm, SSyncBuffer* pBuf, int objId, bool isLast);
// call when restore snapshot and log done
int (*onRestoreDone)(struct SSyncFSM* fsm);
void (*onRollback)(struct SSyncFSM* fsm, SyncIndex index, const SSyncBuffer* buf);
void (*onRoleChanged)(struct SSyncFSM* fsm, const SNodesRole* pRole);
} SSyncFSM;
typedef struct SSyncServerState {
SNodeInfo voteFor;
SSyncTerm term;
} SSyncServerState;
typedef struct SStateManager {
void* pData;
void (*saveServerState)(struct SStateManager* stateMng, const SSyncServerState* state);
const SSyncServerState* (*readServerState)(struct SStateManager* stateMng);
void (*saveCluster)(struct SStateManager* stateMng, const SSyncCluster* cluster);
const SSyncCluster* (*readCluster)(struct SStateManager* stateMng);
} SStateManager;
typedef struct {
SyncGroupId vgId;
twalh walHandle;
SyncIndex snapshotIndex;
SSyncCluster syncCfg;
SSyncFSM fsm;
SStateManager stateManager;
} SSyncInfo;
int32_t syncInit();
void syncCleanUp();
SyncNodeId syncStart(const SSyncInfo*);
void syncStop(SyncNodeId);
int32_t syncPropose(SyncNodeId nodeId, SSyncBuffer buffer, void* pData, bool isWeak);
int32_t syncAddNode(SyncNodeId nodeId, const SNodeInfo *pNode);
int32_t syncRemoveNode(SyncNodeId nodeId, const SNodeInfo *pNode);
extern int32_t syncDebugFlag;
#ifdef __cplusplus
}
#endif
#endif /*_TD_LIBS_SYNC_H*/
......@@ -58,9 +58,9 @@ void walStop(twalh);
void walClose(twalh);
//write
int64_t walWriteWithMsgType(twalh, int8_t msgType, void* body, int32_t bodyLen);
//int64_t walWriteWithMsgType(twalh, int8_t msgType, void* body, int32_t bodyLen);
int64_t walWrite(twalh, void* body, int32_t bodyLen);
int64_t walWriteBatch(twalh, void* body, int32_t* bodyLen, int32_t batchSize);
int64_t walWriteBatch(twalh, void** bodies, int32_t* bodyLen, int32_t batchSize);
//apis for lifecycle management
void walFsync(twalh, bool force);
......
......@@ -178,6 +178,12 @@ extern "C" {
#define setThreadName(name)
#endif
#if defined(_WIN32)
#define TD_DIRSEP "\\"
#else
#define TD_DIRSEP "/"
#endif
#ifdef __cplusplus
}
#endif
......
......@@ -65,7 +65,7 @@ void dnodeSendRedirectMsg(struct SRpcMsg *rpcMsg, bool forShell);
* @param fqdn, the fqdn of dnode.
* @param port, the port of dnode.
*/
void dnodeGetDnodeEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port);
void dnodeGetEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port);
#ifdef __cplusplus
}
......
......@@ -20,6 +20,8 @@
extern "C" {
#endif
typedef enum { MN_STATUS_UNINIT = 0, MN_STATUS_INIT = 1, MN_STATUS_READY = 2, MN_STATUS_CLOSING = 3 } EMnStatus;
typedef struct {
/**
* Send messages to other dnodes, such as create vnode message.
......@@ -54,13 +56,12 @@ typedef struct {
* @param port, the port of dnode.
*/
void (*GetDnodeEp)(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port);
} SMnodeFp;
typedef struct {
SMnodeFp fp;
char clusterId[TSDB_CLUSTER_ID_LEN];
int32_t dnodeId;
SMnodeFp fp;
int64_t clusterId;
int32_t dnodeId;
} SMnodePara;
/**
......@@ -79,10 +80,9 @@ void mnodeCleanup();
/**
* Deploy mnode instances in dnode.
*
* @param minfos, server information used to deploy the mnode instance.
* @return Error Code.
*/
int32_t mnodeDeploy(struct SMInfos *minfos);
int32_t mnodeDeploy();
/**
* Delete the mnode instance deployed in dnode.
......@@ -94,7 +94,7 @@ void mnodeUnDeploy();
*
* @return Server status.
*/
bool mnodeIsServing();
EMnStatus mnodeGetStatus();
typedef struct {
int64_t numOfDnode;
......
......@@ -24,9 +24,10 @@ extern "C" {
typedef struct tmqMsgHead {
int32_t headLen;
int32_t msgVer;
int32_t protoVer;
int64_t cgId;
int64_t topicId;
int64_t clientId;
int32_t checksum;
int32_t msgType;
} tmqMsgHead;
......@@ -34,35 +35,43 @@ typedef struct tmqMsgHead {
//TODO: put msgs into common
typedef struct tmqConnectReq {
tmqMsgHead head;
} tmqConnectReq;
typedef struct tmqConnectResp {
tmqMsgHead head;
int8_t status;
} tmqConnectResp;
typedef struct tmqDisconnectReq {
tmqMsgHead head;
} tmqDisconnectReq;
typedef struct tmqDisconnectResp {
tmqMsgHead head;
int8_t status;
} tmqDiconnectResp;
typedef struct tmqConsumeReq {
tmqMsgHead head;
int64_t commitOffset;
} tmqConsumeReq;
typedef struct tmqConsumeResp {
tmqMsgHead head;
char content[];
} tmqConsumeResp;
typedef struct tmqSubscribeReq {
//
typedef struct tmqMnodeSubscribeReq {
tmqMsgHead head;
int64_t topicLen;
char topic[];
} tmqSubscribeReq;
typedef struct tmqSubscribeResp {
typedef struct tmqMnodeSubscribeResp {
tmqMsgHead head;
int64_t vgId;
char ep[]; //TSDB_EP_LEN
} tmqSubscribeResp;
typedef struct tmqHeartbeatReq {
......@@ -92,6 +101,24 @@ typedef struct STQ {
//value=consumeOffset: int64_t
} STQ;
#define TQ_BUFFER_SIZE 8
typedef struct tqBufferItem {
int64_t offset;
void* executor;
void* content;
} tqBufferItem;
typedef struct tqGroupHandle {
char* topic; //c style, end with '\0'
int64_t cgId;
void* ahandle;
int64_t consumeOffset;
int32_t head;
int32_t tail;
tqBufferItem buffer[TQ_BUFFER_SIZE];
} tqGroupHandle;
//init in each vnode
STQ* tqInit(void* ref_func(void*), void* unref_func(void*));
void tqCleanUp(STQ*);
......@@ -103,6 +130,17 @@ int tqCommit(STQ*);
//void* will be replace by a msg type
int tqHandleConsumeMsg(STQ*, tmqConsumeReq* msg);
tqGroupHandle* tqFindGHandleBycId(STQ*, int64_t cId);
int tqOpenTCGroup(STQ*, int64_t topicId, int64_t cgId, int64_t cId);
int tqCloseTCGroup(STQ*, int64_t topicId, int64_t cgId, int64_t cId);
int tqMoveOffsetToNext(tqGroupHandle*);
int tqResetOffset(STQ*, int64_t topicId, int64_t cgId, int64_t offset);
int tqFetchMsg(tqGroupHandle*, void*);
int tqRegisterContext(tqGroupHandle*, void*);
int tqLaunchQuery(STQ*, int64_t topicId, int64_t cgId, void* query);
int tqSendLaunchQuery(STQ*, int64_t topicId, int64_t cgId, void* query);
#ifdef __cplusplus
}
#endif
......
......@@ -166,6 +166,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_NO_USER_FROM_CONN TAOS_DEF_ERROR_CODE(0, 0x0354) //"Can not get user from conn")
#define TSDB_CODE_MND_TOO_MANY_USERS TAOS_DEF_ERROR_CODE(0, 0x0355) //"Too many users")
#define TSDB_CODE_MND_MNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0348) //"Mnode already exists")
#define TSDB_CODE_MND_MNODE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0349) //"Mnode not there")
#define TSDB_CODE_MND_TABLE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0360) //"Table already exists")
#define TSDB_CODE_MND_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0361) //"Table name too long")
#define TSDB_CODE_MND_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0362) //"Table does not exist")
......
......@@ -62,6 +62,7 @@ extern const int32_t TYPE_BYTES[15];
#define TSDB_DATA_NULL_STR "NULL"
#define TSDB_DATA_NULL_STR_L "null"
#define TSDB_NETTEST_USER "nettestinternal"
#define TSDB_DEFAULT_USER "root"
#ifdef _TD_POWER_
#define TSDB_DEFAULT_PASS "powerdb"
......
......@@ -20,14 +20,15 @@
extern "C" {
#endif
typedef struct SSteps SSteps;
typedef int32_t (*InitFp)();
typedef void (*CleanupFp)();
typedef void (*ReportFp)(char *name, char *desc);
struct SSteps *taosStepInit(int32_t maxsize, ReportFp fp);
int32_t taosStepExec(struct SSteps *steps);
void taosStepCleanup(struct SSteps *steps);
int32_t taosStepAdd(struct SSteps *steps, char *name, InitFp initFp, CleanupFp cleanupFp);
SSteps *taosStepInit(int32_t maxsize, ReportFp fp);
int32_t taosStepExec(SSteps *steps);
void taosStepCleanup(SSteps *steps);
int32_t taosStepAdd(SSteps *steps, char *name, InitFp initFp, CleanupFp cleanupFp);
#ifdef __cplusplus
}
......
......@@ -16,6 +16,8 @@
#ifndef _TD_UTIL_WORKER_H
#define _TD_UTIL_WORKER_H
#include "tqueue.h"
#ifdef __cplusplus
extern "C" {
#endif
......
......@@ -7,4 +7,4 @@ target_include_directories(
target_link_libraries(
taos
INTERFACE api
)
\ No newline at end of file
)
......@@ -19,3 +19,5 @@
//
//}
int taos_init() { return 0; }
void taos_cleanup(void) {}
......@@ -201,8 +201,6 @@ int8_t tscEmbedded = 0;
char tsVnodeDir[PATH_MAX] = {0};
char tsDnodeDir[PATH_MAX] = {0};
char tsMnodeDir[PATH_MAX] = {0};
char tsMnodeTmpDir[PATH_MAX] = {0};
char tsMnodeBakDir[PATH_MAX] = {0};
int32_t tsDiskCfgNum = 0;
int32_t tsTopicBianryLen = 16000;
......
add_subdirectory(transport)
add_subdirectory(raft)
add_subdirectory(sync)
add_subdirectory(tkv)
add_subdirectory(index)
add_subdirectory(wal)
......
aux_source_directory(src RAFT_SRC)
add_library(raft ${RAFT_SRC})
target_include_directories(
raft
PUBLIC "${CMAKE_SOURCE_DIR}/include/libs/raft"
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
\ No newline at end of file
aux_source_directory(src SYNC_SRC)
add_library(sync ${SYNC_SRC})
target_link_libraries(
sync
PUBLIC common
PUBLIC util
PUBLIC wal
)
target_include_directories(
sync
PUBLIC "${CMAKE_SOURCE_DIR}/include/libs/sync"
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
\ No newline at end of file
......@@ -11,4 +11,9 @@
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
\ No newline at end of file
*/
#include "sync.h"
int32_t syncInit() {return 0;}
void syncCleanUp() {}
\ No newline at end of file
......@@ -11,4 +11,9 @@
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
\ No newline at end of file
*/
#include "wal.h"
int32_t walInit() {return 0;}
void walCleanUp() {}
\ No newline at end of file
......@@ -5,6 +5,9 @@ target_link_libraries(
PUBLIC cjson
PUBLIC mnode
PUBLIC vnode
PUBLIC wal
PUBLIC sync
PUBLIC taos
)
target_include_directories(
dnode
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_DNODE_CFG_H_
#define _TD_DNODE_CFG_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "dnodeInt.h"
int32_t dnodeInitCfg();
void dnodeCleanupCfg();
void dnodeUpdateCfg(SDnodeCfg *data);
int32_t dnodeGetDnodeId();
void dnodeGetClusterId(char *clusterId);
void dnodeGetCfg(int32_t *dnodeId, char *clusterId);
void dnodeSetDropped();
#ifdef __cplusplus
}
#endif
#endif /*_TD_DNODE_CFG_H_*/
......@@ -21,7 +21,6 @@ extern "C" {
#endif
#include "dnodeInt.h"
int32_t dnodeInitCheck();
void dnodeCleanupCheck();
......
......@@ -23,9 +23,17 @@ extern "C" {
int32_t dnodeInitEps();
void dnodeCleanupEps();
void dnodeUpdateEps(SDnodeEps *data);
bool dnodeIsDnodeEpChanged(int32_t dnodeId, char *epstr);
void dnodeGetDnodeEp(int32_t dnodeId, char *epstr, char *fqdn, uint16_t *port);
void dnodeUpdateCfg(SDnodeCfg *data);
void dnodeUpdateDnodeEps(SDnodeEps *data);
void dnodeUpdateMnodeEps(SRpcEpSet *pEpSet);
int32_t dnodeGetDnodeId();
int64_t dnodeGetClusterId();
void dnodeGetEp(int32_t dnodeId, char *epstr, char *fqdn, uint16_t *port);
void dnodeGetEpSetForPeer(SRpcEpSet *epSet);
void dnodeGetEpSetForShell(SRpcEpSet *epSet);
void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell);
#ifdef __cplusplus
}
......
......@@ -19,9 +19,12 @@
#ifdef __cplusplus
extern "C" {
#endif
#include "os.h"
#include "taosmsg.h"
#include "tglobal.h"
#include "tlog.h"
#include "trpc.h"
#include "ttimer.h"
#include "dnode.h"
extern int32_t dDebugFlag;
......@@ -33,6 +36,12 @@ extern int32_t dDebugFlag;
#define dDebug(...) { if (dDebugFlag & DEBUG_DEBUG) { taosPrintLog("DND ", dDebugFlag, __VA_ARGS__); }}
#define dTrace(...) { if (dDebugFlag & DEBUG_TRACE) { taosPrintLog("DND ", dDebugFlag, __VA_ARGS__); }}
typedef enum { DN_RUN_STAT_INIT, DN_RUN_STAT_RUNNING, DN_RUN_STAT_STOPPED } EDnStat;
EDnStat dnodeGetRunStat();
void dnodeSetRunStat();
void dnodeGetStartup(SStartupStep *);
#ifdef __cplusplus
}
#endif
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_DNODE_MAIN_H_
#define _TD_DNODE_MAIN_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "dnodeInt.h"
typedef enum {
TD_RUN_STAT_INIT,
TD_RUN_STAT_RUNNING,
TD_RUN_STAT_STOPPED
} RunStat;
int32_t dnodeInitMain();
void dnodeCleanupMain();
int32_t dnodeInitStorage();
void dnodeCleanupStorage();
void dnodeReportStartup(char *name, char *desc);
void dnodeReportStartupFinished(char *name, char *desc);
void dnodeProcessStartupReq(SRpcMsg *pMsg);
void dnodeProcessCreateMnodeReq(SRpcMsg *pMsg);
void dnodeProcessConfigDnodeReq(SRpcMsg *pMsg);
RunStat dnodeGetRunStat();
void dnodeSetRunStat();
void* dnodeGetTimer();
#ifdef __cplusplus
}
#endif
#endif /*_TD_DNODE_MAIN_H_*/
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_DNODE_MNODE_EP_H_
#define _TD_DNODE_MNODE_EP_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "dnodeInt.h"
int32_t dnodeInitMnodeEps();
void dnodeCleanupMnodeEps();
void dnodeUpdateMnodeFromStatus(SMInfos *pMinfos);
void dnodeUpdateMnodeFromPeer(SRpcEpSet *pEpSet);
void dnodeGetEpSetForPeer(SRpcEpSet *epSet);
void dnodeGetEpSetForShell(SRpcEpSet *epSet);
void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell);
#ifdef __cplusplus
}
#endif
#endif /*_TD_DNODE_MNODE_EP_H_*/
......@@ -21,9 +21,12 @@ extern "C" {
#endif
#include "dnodeInt.h"
int32_t dnodeInitStatus();
void dnodeCleanupStatus();
int32_t dnodeInitMsg();
void dnodeCleanupMsg();
void dnodeProcessStatusRsp(SRpcMsg *pMsg);
void dnodeProcessStartupReq(SRpcMsg *pMsg);
void dnodeProcessCreateMnodeReq(SRpcMsg *pMsg);
void dnodeProcessConfigDnodeReq(SRpcMsg *pMsg);
#ifdef __cplusplus
}
......
......@@ -25,7 +25,6 @@ int32_t dnodeInitTrans();
void dnodeCleanupTrans();
void dnodeSendMsgToMnode(SRpcMsg *rpcMsg);
void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg);
void dnodeSendMsgToDnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp, SRpcEpSet *epSet);
#ifdef __cplusplus
}
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "cJSON.h"
#include "tglobal.h"
#include "dnodeCfg.h"
static struct DnCfg {
int32_t dnodeId;
int32_t dropped;
char clusterId[TSDB_CLUSTER_ID_LEN];
char file[PATH_MAX + 20];
pthread_mutex_t mutex;
} tsDcfg;
static int32_t dnodeReadCfg() {
int32_t len = 0;
int32_t maxLen = 200;
char * content = calloc(1, maxLen + 1);
cJSON * root = NULL;
FILE * fp = NULL;
fp = fopen(tsDcfg.file, "r");
if (!fp) {
dDebug("file %s not exist", tsDcfg.file);
goto PARSE_CFG_OVER;
}
len = (int32_t)fread(content, 1, maxLen, fp);
if (len <= 0) {
dError("failed to read %s since content is null", tsDcfg.file);
goto PARSE_CFG_OVER;
}
content[len] = 0;
root = cJSON_Parse(content);
if (root == NULL) {
dError("failed to read %s since invalid json format", tsDcfg.file);
goto PARSE_CFG_OVER;
}
cJSON *dnodeId = cJSON_GetObjectItem(root, "dnodeId");
if (!dnodeId || dnodeId->type != cJSON_Number) {
dError("failed to read %s since dnodeId not found", tsDcfg.file);
goto PARSE_CFG_OVER;
}
tsDcfg.dnodeId = (int32_t)dnodeId->valueint;
cJSON *dropped = cJSON_GetObjectItem(root, "dropped");
if (!dropped || dropped->type != cJSON_Number) {
dError("failed to read %s since dropped not found", tsDcfg.file);
goto PARSE_CFG_OVER;
}
tsDcfg.dropped = (int32_t)dropped->valueint;
cJSON *clusterId = cJSON_GetObjectItem(root, "clusterId");
if (!clusterId || clusterId->type != cJSON_String) {
dError("failed to read %s since clusterId not found", tsDcfg.file);
goto PARSE_CFG_OVER;
}
tstrncpy(tsDcfg.clusterId, clusterId->valuestring, TSDB_CLUSTER_ID_LEN);
dInfo("successed to read %s", tsDcfg.file);
PARSE_CFG_OVER:
if (content != NULL) free(content);
if (root != NULL) cJSON_Delete(root);
if (fp != NULL) fclose(fp);
terrno = 0;
return 0;
}
static int32_t dnodeWriteCfg() {
FILE *fp = fopen(tsDcfg.file, "w");
if (!fp) {
dError("failed to write %s since %s", tsDcfg.file, strerror(errno));
return -1;
}
int32_t len = 0;
int32_t maxLen = 200;
char * content = calloc(1, maxLen + 1);
len += snprintf(content + len, maxLen - len, "{\n");
len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d,\n", tsDcfg.dnodeId);
len += snprintf(content + len, maxLen - len, " \"dropped\": %d,\n", tsDcfg.dropped);
len += snprintf(content + len, maxLen - len, " \"clusterId\": \"%s\"\n", tsDcfg.clusterId);
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
taosFsyncFile(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
dInfo("successed to write %s", tsDcfg.file);
return 0;
}
int32_t dnodeInitCfg() {
tsDcfg.dnodeId = 0;
tsDcfg.dropped = 0;
tsDcfg.clusterId[0] = 0;
snprintf(tsDcfg.file, sizeof(tsDcfg.file), "%s/dnodeCfg.json", tsDnodeDir);
pthread_mutex_init(&tsDcfg.mutex, NULL);
int32_t ret = dnodeReadCfg();
if (ret == 0) {
dInfo("dnode cfg is initialized");
}
if (tsDcfg.dropped) {
dInfo("dnode is dropped and start to exit");
return -1;
}
return ret;
}
void dnodeCleanupCfg() {
pthread_mutex_destroy(&tsDcfg.mutex);
}
void dnodeUpdateCfg(SDnodeCfg *data) {
if (tsDcfg.dnodeId != 0) return;
pthread_mutex_lock(&tsDcfg.mutex);
tsDcfg.dnodeId = data->dnodeId;
tstrncpy(tsDcfg.clusterId, data->clusterId, TSDB_CLUSTER_ID_LEN);
dInfo("dnodeId is set to %d, clusterId is set to %s", data->dnodeId, data->clusterId);
dnodeWriteCfg();
pthread_mutex_unlock(&tsDcfg.mutex);
}
void dnodeSetDropped() {
pthread_mutex_lock(&tsDcfg.mutex);
tsDcfg.dropped = 1;
dnodeWriteCfg();
pthread_mutex_unlock(&tsDcfg.mutex);
}
int32_t dnodeGetDnodeId() {
int32_t dnodeId = 0;
pthread_mutex_lock(&tsDcfg.mutex);
dnodeId = tsDcfg.dnodeId;
pthread_mutex_unlock(&tsDcfg.mutex);
return dnodeId;
}
void dnodeGetClusterId(char *clusterId) {
pthread_mutex_lock(&tsDcfg.mutex);
tstrncpy(clusterId, tsDcfg.clusterId, TSDB_CLUSTER_ID_LEN);
pthread_mutex_unlock(&tsDcfg.mutex);
}
void dnodeGetCfg(int32_t *dnodeId, char *clusterId) {
pthread_mutex_lock(&tsDcfg.mutex);
*dnodeId = tsDcfg.dnodeId;
tstrncpy(clusterId, tsDcfg.clusterId, TSDB_CLUSTER_ID_LEN);
pthread_mutex_unlock(&tsDcfg.mutex);
}
......@@ -14,8 +14,6 @@
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "tglobal.h"
#include "dnodeCheck.h"
#define MIN_AVAIL_MEMORY_MB 32
......
......@@ -14,131 +14,250 @@
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "dnodeEps.h"
#include "cJSON.h"
#include "thash.h"
#include "tglobal.h"
#include "dnodeEps.h"
#include "dnodeCfg.h"
static struct {
int32_t dnodeId;
int32_t dnodeNum;
SDnodeEp * dnodeList;
SHashObj * dnodeHash;
char file[PATH_MAX + 20];
int32_t dnodeId;
int32_t dropped;
int64_t clusterId;
SDnodeEps *dnodeEps;
SHashObj *dnodeHash;
SRpcEpSet mnodeEpSetForShell;
SRpcEpSet mnodeEpSetForPeer;
char file[PATH_MAX + 20];
pthread_mutex_t mutex;
} tsDeps;
} tsEps;
void dnodeGetEpSetForPeer(SRpcEpSet *epSet) {
pthread_mutex_lock(&tsEps.mutex);
*epSet = tsEps.mnodeEpSetForPeer;
pthread_mutex_unlock(&tsEps.mutex);
}
void dnodeGetEpSetForShell(SRpcEpSet *epSet) {
pthread_mutex_lock(&tsEps.mutex);
*epSet = tsEps.mnodeEpSetForShell;
pthread_mutex_unlock(&tsEps.mutex);
}
void dnodeUpdateMnodeEps(SRpcEpSet *ep) {
if (ep != NULL || ep->numOfEps <= 0) {
dError("mnode is changed, but content is invalid, discard it");
return;
}
pthread_mutex_lock(&tsEps.mutex);
dInfo("mnode is changed, num:%d use:%d", ep->numOfEps, ep->inUse);
tsEps.mnodeEpSetForPeer = *ep;
for (int32_t i = 0; i < ep->numOfEps; ++i) {
ep->port[i] -= TSDB_PORT_DNODEDNODE;
dInfo("mnode index:%d %s:%u", i, ep->fqdn[i], ep->port[i]);
}
tsEps.mnodeEpSetForShell = *ep;
pthread_mutex_unlock(&tsEps.mutex);
}
void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell) {
SRpcConnInfo connInfo = {0};
rpcGetConnInfo(rpcMsg->handle, &connInfo);
SRpcEpSet epSet = {0};
if (forShell) {
dnodeGetEpSetForShell(&epSet);
} else {
dnodeGetEpSetForPeer(&epSet);
}
dDebug("msg:%s will be redirected, num:%d use:%d", taosMsg[rpcMsg->msgType], epSet.numOfEps, epSet.inUse);
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
dDebug("mnode index:%d %s:%d", i, epSet.fqdn[i], epSet.port[i]);
if (strcmp(epSet.fqdn[i], tsLocalFqdn) == 0) {
if ((epSet.port[i] == tsServerPort + TSDB_PORT_DNODEDNODE && !forShell) ||
(epSet.port[i] == tsServerPort && forShell)) {
epSet.inUse = (i + 1) % epSet.numOfEps;
dDebug("mnode index:%d %s:%d set inUse to %d", i, epSet.fqdn[i], epSet.port[i], epSet.inUse);
}
}
epSet.port[i] = htons(epSet.port[i]);
}
rpcSendRedirectRsp(rpcMsg->handle, &epSet);
}
static void dnodePrintEps() {
dDebug("print dnodeEp, dnodeNum:%d", tsDeps.dnodeNum);
for (int32_t i = 0; i < tsDeps.dnodeNum; i++) {
SDnodeEp *ep = &tsDeps.dnodeList[i];
dDebug("dnode:%d, dnodeFqdn:%s dnodePort:%u", ep->dnodeId, ep->dnodeFqdn, ep->dnodePort);
dDebug("print dnode list, num:%d", tsEps.dnodeEps->dnodeNum);
for (int32_t i = 0; i < tsEps.dnodeEps->dnodeNum; i++) {
SDnodeEp *ep = &tsEps.dnodeEps->dnodeEps[i];
dDebug("dnode:%d, fqdn:%s port:%u isMnode:%d", ep->dnodeId, ep->dnodeFqdn, ep->dnodePort, ep->isMnode);
}
}
static void dnodeResetEps(SDnodeEps *data) {
assert(data != NULL);
if (data->dnodeNum > tsDeps.dnodeNum) {
SDnodeEp *tmp = calloc(data->dnodeNum, sizeof(SDnodeEp));
int32_t size = sizeof(SDnodeEps) + data->dnodeNum * sizeof(SDnodeEp);
if (data->dnodeNum > tsEps.dnodeEps->dnodeNum) {
SDnodeEps *tmp = calloc(1, size);
if (tmp == NULL) return;
tfree(tsDeps.dnodeList);
tsDeps.dnodeList = tmp;
tsDeps.dnodeNum = data->dnodeNum;
memcpy(tsDeps.dnodeList, data->dnodeEps, tsDeps.dnodeNum * sizeof(SDnodeEp));
dnodePrintEps();
tfree(tsEps.dnodeEps);
tsEps.dnodeEps = tmp;
}
for (int32_t i = 0; i < tsDeps.dnodeNum; ++i) {
SDnodeEp *ep = &tsDeps.dnodeList[i];
taosHashPut(tsDeps.dnodeHash, &ep->dnodeId, sizeof(int32_t), ep, sizeof(SDnodeEp));
}
if (tsEps.dnodeEps != data) {
memcpy(tsEps.dnodeEps, data, size);
}
tsEps.mnodeEpSetForPeer.inUse = 0;
tsEps.mnodeEpSetForShell.inUse = 0;
int32_t index = 0;
for (int32_t i = 0; i < tsEps.dnodeEps->dnodeNum; i++) {
SDnodeEp *ep = &tsEps.dnodeEps->dnodeEps[i];
if (!ep->isMnode) continue;
if (index >= TSDB_MAX_REPLICA) continue;
strcpy(tsEps.mnodeEpSetForShell.fqdn[index], ep->dnodeFqdn);
strcpy(tsEps.mnodeEpSetForPeer.fqdn[index], ep->dnodeFqdn);
tsEps.mnodeEpSetForShell.port[index] = ep->dnodePort;
tsEps.mnodeEpSetForShell.port[index] = ep->dnodePort + tsDnodeDnodePort;
index++;
}
for (int32_t i = 0; i < tsEps.dnodeEps->dnodeNum; ++i) {
SDnodeEp *ep = &tsEps.dnodeEps->dnodeEps[i];
taosHashPut(tsEps.dnodeHash, &ep->dnodeId, sizeof(int32_t), ep, sizeof(SDnodeEp));
}
dnodePrintEps();
}
static bool dnodeIsDnodeEpChanged(int32_t dnodeId, char *epstr) {
bool changed = false;
pthread_mutex_lock(&tsEps.mutex);
SDnodeEp *ep = taosHashGet(tsEps.dnodeHash, &dnodeId, sizeof(int32_t));
if (ep != NULL) {
char epSaved[TSDB_EP_LEN + 1];
snprintf(epSaved, TSDB_EP_LEN, "%s:%u", ep->dnodeFqdn, ep->dnodePort);
changed = strcmp(epstr, epSaved) != 0;
tstrncpy(epstr, epSaved, TSDB_EP_LEN);
}
pthread_mutex_unlock(&tsEps.mutex);
return changed;
}
static int32_t dnodeReadEps() {
int32_t len = 0;
int32_t maxLen = 30000;
char * content = calloc(1, maxLen + 1);
cJSON * root = NULL;
FILE * fp = NULL;
char *content = calloc(1, maxLen + 1);
cJSON *root = NULL;
FILE *fp = NULL;
fp = fopen(tsDeps.file, "r");
fp = fopen(tsEps.file, "r");
if (!fp) {
dDebug("file %s not exist", tsDeps.file);
dDebug("file %s not exist", tsEps.file);
goto PRASE_EPS_OVER;
}
len = (int32_t)fread(content, 1, maxLen, fp);
if (len <= 0) {
dError("failed to read %s since content is null", tsDeps.file);
dError("failed to read %s since content is null", tsEps.file);
goto PRASE_EPS_OVER;
}
content[len] = 0;
root = cJSON_Parse(content);
if (root == NULL) {
dError("failed to read %s since invalid json format", tsDeps.file);
dError("failed to read %s since invalid json format", tsEps.file);
goto PRASE_EPS_OVER;
}
cJSON *dnodeId = cJSON_GetObjectItem(root, "dnodeId");
if (!dnodeId || dnodeId->type != cJSON_String) {
dError("failed to read %s since dnodeId not found", tsEps.file);
goto PRASE_EPS_OVER;
}
tsEps.dnodeId = atoi(dnodeId->valuestring);
cJSON *dnodeNum = cJSON_GetObjectItem(root, "dnodeNum");
if (!dnodeNum || dnodeNum->type != cJSON_Number) {
dError("failed to read %s since dnodeNum not found", tsDeps.file);
cJSON *dropped = cJSON_GetObjectItem(root, "dropped");
if (!dropped || dropped->type != cJSON_String) {
dError("failed to read %s since dropped not found", tsEps.file);
goto PRASE_EPS_OVER;
}
tsEps.dropped = atoi(dropped->valuestring);
cJSON *clusterId = cJSON_GetObjectItem(root, "clusterId");
if (!clusterId || clusterId->type != cJSON_String) {
dError("failed to read %s since clusterId not found", tsEps.file);
goto PRASE_EPS_OVER;
}
tsEps.clusterId = atoll(clusterId->valuestring);
cJSON *dnodeInfos = cJSON_GetObjectItem(root, "dnodeInfos");
if (!dnodeInfos || dnodeInfos->type != cJSON_Array) {
dError("failed to read %s since dnodeInfos not found", tsDeps.file);
dError("failed to read %s since dnodeInfos not found", tsEps.file);
goto PRASE_EPS_OVER;
}
int32_t dnodeInfosSize = cJSON_GetArraySize(dnodeInfos);
if (dnodeInfosSize != dnodeNum->valueint) {
dError("failed to read %s since dnodeInfos size:%d not matched dnodeNum:%d", tsDeps.file, dnodeInfosSize,
(int32_t)dnodeNum->valueint);
if (dnodeInfosSize <= 0) {
dError("failed to read %s since dnodeInfos size:%d invalid", tsEps.file, dnodeInfosSize);
goto PRASE_EPS_OVER;
}
tsDeps.dnodeNum = dnodeInfosSize;
tsDeps.dnodeList = calloc(dnodeInfosSize, sizeof(SDnodeEp));
if (tsDeps.dnodeList == NULL) {
tsEps.dnodeEps = calloc(1, dnodeInfosSize * sizeof(SDnodeEp) + sizeof(SDnodeEps));
if (tsEps.dnodeEps == NULL) {
dError("failed to calloc dnodeEpList since %s", strerror(errno));
goto PRASE_EPS_OVER;
}
tsEps.dnodeEps->dnodeNum = dnodeInfosSize;
for (int32_t i = 0; i < dnodeInfosSize; ++i) {
cJSON *dnodeInfo = cJSON_GetArrayItem(dnodeInfos, i);
if (dnodeInfo == NULL) break;
SDnodeEp *ep = &tsDeps.dnodeList[i];
SDnodeEp *ep = &tsEps.dnodeEps->dnodeEps[i];
cJSON *dnodeId = cJSON_GetObjectItem(dnodeInfo, "dnodeId");
if (!dnodeId || dnodeId->type != cJSON_Number) {
dError("failed to read %s, dnodeId not found", tsDeps.file);
if (!dnodeId || dnodeId->type != cJSON_String) {
dError("failed to read %s, dnodeId not found", tsEps.file);
goto PRASE_EPS_OVER;
}
ep->dnodeId = atoi(dnodeId->valuestring);
cJSON *isMnode = cJSON_GetObjectItem(dnodeInfo, "isMnode");
if (!isMnode || isMnode->type != cJSON_String) {
dError("failed to read %s, isMnode not found", tsEps.file);
goto PRASE_EPS_OVER;
}
ep->dnodeId = (int32_t)dnodeId->valueint;
ep->isMnode = atoi(isMnode->valuestring);
cJSON *dnodeFqdn = cJSON_GetObjectItem(dnodeInfo, "dnodeFqdn");
if (!dnodeFqdn || dnodeFqdn->type != cJSON_String || dnodeFqdn->valuestring == NULL) {
dError("failed to read %s, dnodeFqdn not found", tsDeps.file);
dError("failed to read %s, dnodeFqdn not found", tsEps.file);
goto PRASE_EPS_OVER;
}
tstrncpy(ep->dnodeFqdn, dnodeFqdn->valuestring, TSDB_FQDN_LEN);
cJSON *dnodePort = cJSON_GetObjectItem(dnodeInfo, "dnodePort");
if (!dnodePort || dnodePort->type != cJSON_Number) {
dError("failed to read %s, dnodePort not found", tsDeps.file);
if (!dnodePort || dnodePort->type != cJSON_String) {
dError("failed to read %s, dnodePort not found", tsEps.file);
goto PRASE_EPS_OVER;
}
ep->dnodePort = (uint16_t)dnodePort->valueint;
ep->dnodePort = atoi(dnodePort->valuestring);
}
dInfo("succcessed to read file %s", tsDeps.file);
dInfo("succcessed to read file %s", tsEps.file);
dnodePrintEps();
PRASE_EPS_OVER:
......@@ -146,35 +265,40 @@ PRASE_EPS_OVER:
if (root != NULL) cJSON_Delete(root);
if (fp != NULL) fclose(fp);
if (dnodeIsDnodeEpChanged(tsDeps.dnodeId, tsLocalEp)) {
dError("dnode:%d, localEp different from %s dnodeEps.json and need reconfigured", tsDeps.dnodeId, tsLocalEp);
if (dnodeIsDnodeEpChanged(tsEps.dnodeId, tsLocalEp)) {
dError("dnode:%d, localEp %s different with dnodeEps.json and need reconfigured", tsEps.dnodeId, tsLocalEp);
return -1;
}
dnodeResetEps(tsEps.dnodeEps);
terrno = 0;
return 0;
}
static int32_t dnodeWriteEps() {
FILE *fp = fopen(tsDeps.file, "w");
FILE *fp = fopen(tsEps.file, "w");
if (!fp) {
dError("failed to write %s since %s", tsDeps.file, strerror(errno));
dError("failed to write %s since %s", tsEps.file, strerror(errno));
return -1;
}
int32_t len = 0;
int32_t maxLen = 30000;
char * content = calloc(1, maxLen + 1);
char *content = calloc(1, maxLen + 1);
len += snprintf(content + len, maxLen - len, "{\n");
len += snprintf(content + len, maxLen - len, " \"dnodeNum\": %d,\n", tsDeps.dnodeNum);
len += snprintf(content + len, maxLen - len, " \"dnodeId\": \"%d\",\n", tsEps.dnodeId);
len += snprintf(content + len, maxLen - len, " \"dropped\": \"%d\",\n", tsEps.dropped);
len += snprintf(content + len, maxLen - len, " \"clusterId\": \"%" PRId64 "\",\n", tsEps.clusterId);
len += snprintf(content + len, maxLen - len, " \"dnodeInfos\": [{\n");
for (int32_t i = 0; i < tsDeps.dnodeNum; ++i) {
SDnodeEp *ep = &tsDeps.dnodeList[i];
len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d,\n", ep->dnodeId);
for (int32_t i = 0; i < tsEps.dnodeEps->dnodeNum; ++i) {
SDnodeEp *ep = &tsEps.dnodeEps->dnodeEps[i];
len += snprintf(content + len, maxLen - len, " \"dnodeId\": \"%d\",\n", ep->dnodeId);
len += snprintf(content + len, maxLen - len, " \"isMnode\": \"%d\",\n", ep->isMnode);
len += snprintf(content + len, maxLen - len, " \"dnodeFqdn\": \"%s\",\n", ep->dnodeFqdn);
len += snprintf(content + len, maxLen - len, " \"dnodePort\": %u\n", ep->dnodePort);
if (i < tsDeps.dnodeNum - 1) {
len += snprintf(content + len, maxLen - len, " \"dnodePort\": \"%u\"\n", ep->dnodePort);
if (i < tsEps.dnodeEps->dnodeNum - 1) {
len += snprintf(content + len, maxLen - len, " },{\n");
} else {
len += snprintf(content + len, maxLen - len, " }]\n");
......@@ -188,18 +312,20 @@ static int32_t dnodeWriteEps() {
free(content);
terrno = 0;
dInfo("successed to write %s", tsDeps.file);
dInfo("successed to write %s", tsEps.file);
return 0;
}
int32_t dnodeInitEps() {
tsDeps.dnodeHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
if (tsDeps.dnodeHash == NULL) return -1;
tsEps.dnodeId = 0;
tsEps.dropped = 0;
tsEps.clusterId = 0;
tsEps.dnodeEps = NULL;
snprintf(tsEps.file, sizeof(tsEps.file), "%s/dnodeEps.json", tsDnodeDir);
pthread_mutex_init(&tsEps.mutex, NULL);
tsDeps.dnodeId = dnodeGetDnodeId();
tsDeps.dnodeNum = 0;
snprintf(tsDeps.file, sizeof(tsDeps.file), "%s/dnodeEps.json", tsDnodeDir);
pthread_mutex_init(&tsDeps.mutex, NULL);
tsEps.dnodeHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
if (tsEps.dnodeHash == NULL) return -1;
int32_t ret = dnodeReadEps();
if (ret == 0) {
......@@ -210,75 +336,80 @@ int32_t dnodeInitEps() {
}
void dnodeCleanupEps() {
pthread_mutex_lock(&tsDeps.mutex);
pthread_mutex_lock(&tsEps.mutex);
if (tsDeps.dnodeList != NULL) {
free(tsDeps.dnodeList);
tsDeps.dnodeList = NULL;
if (tsEps.dnodeEps != NULL) {
free(tsEps.dnodeEps);
tsEps.dnodeEps = NULL;
}
if (tsDeps.dnodeHash) {
taosHashCleanup(tsDeps.dnodeHash);
tsDeps.dnodeHash = NULL;
if (tsEps.dnodeHash) {
taosHashCleanup(tsEps.dnodeHash);
tsEps.dnodeHash = NULL;
}
tsDeps.dnodeNum = 0;
pthread_mutex_unlock(&tsDeps.mutex);
pthread_mutex_destroy(&tsDeps.mutex);
pthread_mutex_unlock(&tsEps.mutex);
pthread_mutex_destroy(&tsEps.mutex);
}
void dnodeUpdateEps(SDnodeEps *data) {
void dnodeUpdateDnodeEps(SDnodeEps *data) {
if (data == NULL || data->dnodeNum <= 0) return;
data->dnodeNum = htonl(data->dnodeNum);
for (int32_t i = 0; i < data->dnodeNum; ++i) {
data->dnodeEps[i].dnodeId = htonl(data->dnodeEps[i].dnodeId);
data->dnodeEps[i].dnodePort = htons(data->dnodeEps[i].dnodePort);
}
pthread_mutex_lock(&tsEps.mutex);
pthread_mutex_lock(&tsDeps.mutex);
if (data->dnodeNum != tsDeps.dnodeNum) {
if (data->dnodeNum != tsEps.dnodeEps->dnodeNum) {
dnodeResetEps(data);
dnodeWriteEps();
} else {
int32_t size = data->dnodeNum * sizeof(SDnodeEp);
if (memcmp(tsDeps.dnodeList, data->dnodeEps, size) != 0) {
int32_t size = data->dnodeNum * sizeof(SDnodeEp) + sizeof(SDnodeEps);
if (memcmp(tsEps.dnodeEps, data, size) != 0) {
dnodeResetEps(data);
dnodeWriteEps();
}
}
pthread_mutex_unlock(&tsDeps.mutex);
pthread_mutex_unlock(&tsEps.mutex);
}
bool dnodeIsDnodeEpChanged(int32_t dnodeId, char *epstr) {
bool changed = false;
pthread_mutex_lock(&tsDeps.mutex);
void dnodeGetEp(int32_t dnodeId, char *epstr, char *fqdn, uint16_t *port) {
pthread_mutex_lock(&tsEps.mutex);
SDnodeEp *ep = taosHashGet(tsDeps.dnodeHash, &dnodeId, sizeof(int32_t));
SDnodeEp *ep = taosHashGet(tsEps.dnodeHash, &dnodeId, sizeof(int32_t));
if (ep != NULL) {
char epSaved[TSDB_EP_LEN + 1];
snprintf(epSaved, TSDB_EP_LEN, "%s:%u", ep->dnodeFqdn, ep->dnodePort);
changed = strcmp(epstr, epSaved) != 0;
tstrncpy(epstr, epSaved, TSDB_EP_LEN);
if (port) *port = ep->dnodePort;
if (fqdn) tstrncpy(fqdn, ep->dnodeFqdn, TSDB_FQDN_LEN);
if (epstr) snprintf(epstr, TSDB_EP_LEN, "%s:%u", ep->dnodeFqdn, ep->dnodePort);
}
pthread_mutex_unlock(&tsDeps.mutex);
return changed;
pthread_mutex_unlock(&tsEps.mutex);
}
void dnodeGetDnodeEp(int32_t dnodeId, char *epstr, char *fqdn, uint16_t *port) {
pthread_mutex_lock(&tsDeps.mutex);
void dnodeUpdateCfg(SDnodeCfg *data) {
if (tsEps.dnodeId != 0 && !data->dropped) return;
SDnodeEp *ep = taosHashGet(tsDeps.dnodeHash, &dnodeId, sizeof(int32_t));
if (ep != NULL) {
if (port) *port = ep->dnodePort;
if (fqdn) tstrncpy(fqdn, ep->dnodeFqdn, TSDB_FQDN_LEN);
if (epstr) snprintf(epstr, TSDB_EP_LEN, "%s:%u", ep->dnodeFqdn, ep->dnodePort);
}
pthread_mutex_lock(&tsEps.mutex);
tsEps.dnodeId = data->dnodeId;
tsEps.clusterId = data->clusterId;
tsEps.dropped = data->dropped;
dInfo("dnodeId is set to %d, clusterId is set to %" PRId64, data->dnodeId, data->clusterId);
dnodeWriteEps();
pthread_mutex_unlock(&tsEps.mutex);
}
int32_t dnodeGetDnodeId() {
int32_t dnodeId = 0;
pthread_mutex_lock(&tsEps.mutex);
dnodeId = tsEps.dnodeId;
pthread_mutex_unlock(&tsEps.mutex);
return dnodeId;
}
pthread_mutex_unlock(&tsDeps.mutex);
int64_t dnodeGetClusterId() {
int64_t clusterId = 0;
pthread_mutex_lock(&tsEps.mutex);
clusterId = tsEps.clusterId;
pthread_mutex_unlock(&tsEps.mutex);
return clusterId;
}
......@@ -14,71 +14,178 @@
*/
#define _DEFAULT_SOURCE
#include "os.h"
#if 0
#include "qScript.h"
#include "tfile.h"
#include "tsync.h"
#include "twal.h"
#endif
#include "tstep.h"
#include "dnodeCfg.h"
#include "dnodeCheck.h"
#include "dnodeEps.h"
#include "dnodeMain.h"
#include "dnodeMnodeEps.h"
#include "dnodeStatus.h"
#include "dnodeTelem.h"
#include "dnodeMsg.h"
#include "dnodeTrans.h"
#include "mnode.h"
#include "sync.h"
#include "tcache.h"
#include "tconfig.h"
#include "tnote.h"
#include "tstep.h"
#include "vnode.h"
#include "wal.h"
static struct {
EDnStat runStatus;
SStartupStep startup;
SSteps *steps;
} tsDnode;
EDnStat dnodeGetRunStat() { return tsDnode.runStatus; }
void dnodeSetRunStat(EDnStat stat) { tsDnode.runStatus = stat; }
static void dnodeReportStartup(char *name, char *desc) {
SStartupStep *startup = &tsDnode.startup;
tstrncpy(startup->name, name, strlen(startup->name));
tstrncpy(startup->desc, desc, strlen(startup->desc));
startup->finished = 0;
}
static void dnodeReportStartupFinished(char *name, char *desc) {
SStartupStep *startup = &tsDnode.startup;
tstrncpy(startup->name, name, strlen(startup->name));
tstrncpy(startup->desc, desc, strlen(startup->desc));
startup->finished = 1;
}
static struct SSteps *tsSteps;
void dnodeGetStartup(SStartupStep *pStep) { memcpy(pStep, &tsDnode.startup, sizeof(SStartupStep)); }
static int32_t dnodeInitVnodeModule(void **unused) {
static int32_t dnodeInitVnode() {
SVnodePara para;
para.fp.GetDnodeEp = dnodeGetDnodeEp;
para.fp.GetDnodeEp = dnodeGetEp;
para.fp.SendMsgToDnode = dnodeSendMsgToDnode;
para.fp.SendMsgToMnode = dnodeSendMsgToMnode;
return vnodeInit(para);
}
static int32_t dnodeInitMnodeModule(void **unused) {
static int32_t dnodeInitMnode() {
SMnodePara para;
para.fp.GetDnodeEp = dnodeGetDnodeEp;
para.fp.GetDnodeEp = dnodeGetEp;
para.fp.SendMsgToDnode = dnodeSendMsgToDnode;
para.fp.SendMsgToMnode = dnodeSendMsgToMnode;
para.fp.SendRedirectMsg = dnodeSendRedirectMsg;
dnodeGetCfg(&para.dnodeId, para.clusterId);
para.dnodeId = dnodeGetDnodeId();
para.clusterId = dnodeGetClusterId();
return mnodeInit(para);
}
static int32_t dnodeInitTfs() {}
static int32_t dnodeInitMain() {
tsDnode.runStatus = DN_RUN_STAT_STOPPED;
tscEmbedded = 1;
taosIgnSIGPIPE();
taosBlockSIGPIPE();
taosResolveCRC();
taosInitGlobalCfg();
taosReadGlobalLogCfg();
taosSetCoreDump(tsEnableCoreFile);
if (!taosMkDir(tsLogDir)) {
printf("failed to create dir: %s, reason: %s\n", tsLogDir, strerror(errno));
return -1;
}
char temp[TSDB_FILENAME_LEN];
sprintf(temp, "%s/taosdlog", tsLogDir);
if (taosInitLog(temp, tsNumOfLogLines, 1) < 0) {
printf("failed to init log file\n");
}
if (!taosReadGlobalCfg()) {
taosPrintGlobalCfg();
dError("TDengine read global config failed");
return -1;
}
dInfo("start to initialize TDengine");
taosInitNotes();
return taosCheckGlobalCfg();
}
static void dnodeCleanupMain() {
taos_cleanup();
taosCloseLog();
taosStopCacheRefreshWorker();
}
static int32_t dnodeCheckRunning(char *dir) {
char filepath[256] = {0};
snprintf(filepath, sizeof(filepath), "%s/.running", dir);
FileFd fd = taosOpenFileCreateWriteTrunc(filepath);
if (fd < 0) {
dError("failed to open lock file:%s since %s, quit", filepath, strerror(errno));
return -1;
}
int32_t ret = taosLockFile(fd);
if (ret != 0) {
dError("failed to lock file:%s since %s, quit", filepath, strerror(errno));
taosCloseFile(fd);
return -1;
}
return 0;
}
static int32_t dnodeInitDir() {
sprintf(tsMnodeDir, "%s/mnode", tsDataDir);
sprintf(tsVnodeDir, "%s/vnode", tsDataDir);
sprintf(tsDnodeDir, "%s/dnode", tsDataDir);
if (!taosMkDir(tsDnodeDir)) {
dError("failed to create dir:%s since %s", tsDnodeDir, strerror(errno));
return -1;
}
if (!taosMkDir(tsMnodeDir)) {
dError("failed to create dir:%s since %s", tsMnodeDir, strerror(errno));
return -1;
}
if (!taosMkDir(tsVnodeDir)) {
dError("failed to create dir:%s since %s", tsVnodeDir, strerror(errno));
return -1;
}
if (dnodeCheckRunning(tsDnodeDir) != 0) {
return -1;
}
return 0;
}
static void dnodeCleanupDir() {}
int32_t dnodeInit() {
tsSteps = taosStepInit(24, dnodeReportStartup);
if (tsSteps == NULL) return -1;
taosStepAdd(tsSteps, "dnode-main", dnodeInitMain, dnodeCleanupMain);
taosStepAdd(tsSteps, "dnode-storage", dnodeInitStorage, dnodeCleanupStorage);
//taosStepAdd(tsSteps, "dnode-tfs", tfInit, tfCleanup);
taosStepAdd(tsSteps, "dnode-rpc", rpcInit, rpcCleanup);
taosStepAdd(tsSteps, "dnode-check", dnodeInitCheck, dnodeCleanupCheck);
taosStepAdd(tsSteps, "dnode-cfg", dnodeInitCfg, dnodeCleanupCfg);
taosStepAdd(tsSteps, "dnode-deps", dnodeInitEps, dnodeCleanupEps);
taosStepAdd(tsSteps, "dnode-meps", dnodeInitMnodeEps, dnodeCleanupMnodeEps);
//taosStepAdd(tsSteps, "dnode-wal", walInit, walCleanUp);
//taosStepAdd(tsSteps, "dnode-sync", syncInit, syncCleanUp);
taosStepAdd(tsSteps, "dnode-vnode", dnodeInitVnodeModule, vnodeCleanup);
taosStepAdd(tsSteps, "dnode-mnode", dnodeInitMnodeModule, mnodeCleanup);
taosStepAdd(tsSteps, "dnode-trans", dnodeInitTrans, dnodeCleanupTrans);
taosStepAdd(tsSteps, "dnode-status", dnodeInitStatus, dnodeCleanupStatus);
taosStepAdd(tsSteps, "dnode-telem", dnodeInitTelem, dnodeCleanupTelem);
//taosStepAdd(tsSteps, "dnode-script",scriptEnvPoolInit, scriptEnvPoolCleanup);
taosStepExec(tsSteps);
dnodeSetRunStat(TD_RUN_STAT_RUNNING);
SSteps *steps = taosStepInit(24, dnodeReportStartup);
if (steps == NULL) return -1;
taosStepAdd(steps, "dnode-main", dnodeInitMain, dnodeCleanupMain);
taosStepAdd(steps, "dnode-dir", dnodeInitDir, dnodeCleanupDir);
taosStepAdd(steps, "dnode-check", dnodeInitCheck, dnodeCleanupCheck);
taosStepAdd(steps, "dnode-rpc", rpcInit, rpcCleanup);
taosStepAdd(steps, "dnode-tfs", dnodeInitTfs, NULL);
taosStepAdd(steps, "dnode-wal", walInit, walCleanUp);
taosStepAdd(steps, "dnode-sync", syncInit, syncCleanUp);
taosStepAdd(steps, "dnode-eps", dnodeInitEps, dnodeCleanupEps);
taosStepAdd(steps, "dnode-vnode", dnodeInitVnode, vnodeCleanup);
taosStepAdd(steps, "dnode-mnode", dnodeInitMnode, mnodeCleanup);
taosStepAdd(steps, "dnode-trans", dnodeInitTrans, dnodeCleanupTrans);
taosStepAdd(steps, "dnode-msg", dnodeInitMsg, dnodeCleanupMsg);
tsDnode.steps = steps;
taosStepExec(tsDnode.steps);
dnodeSetRunStat(DN_RUN_STAT_RUNNING);
dnodeReportStartupFinished("TDengine", "initialized successfully");
dInfo("TDengine is initialized successfully");
......@@ -86,9 +193,9 @@ int32_t dnodeInit() {
}
void dnodeCleanup() {
if (dnodeGetRunStat() != TD_RUN_STAT_STOPPED) {
dnodeSetRunStat(TD_RUN_STAT_STOPPED);
taosStepCleanup(tsSteps);
tsSteps = NULL;
if (dnodeGetRunStat() != DN_RUN_STAT_STOPPED) {
dnodeSetRunStat(DN_RUN_STAT_STOPPED);
taosStepCleanup(tsDnode.steps);
tsDnode.steps = NULL;
}
}
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "tcache.h"
#include "tconfig.h"
#include "tglobal.h"
#if 0
#include "tfs.h"
#endif
#include "tnote.h"
#include "tcompression.h"
#include "ttimer.h"
#include "dnodeCfg.h"
#include "dnodeMain.h"
#include "mnode.h"
static struct {
RunStat runStatus;
void * dnodeTimer;
SStartupStep startup;
} tsDmain;
static void dnodeCheckDataDirOpenned(char *dir) {
#if 0
char filepath[256] = {0};
snprintf(filepath, sizeof(filepath), "%s/.running", dir);
int32_t fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO);
if (fd < 0) {
dError("failed to open lock file:%s, reason: %s, quit", filepath, strerror(errno));
exit(0);
}
int32_t ret = flock(fd, LOCK_EX | LOCK_NB);
if (ret != 0) {
dError("failed to lock file:%s ret:%d since %s, database may be running, quit", filepath, ret, strerror(errno));
close(fd);
exit(0);
}
#endif
}
int32_t dnodeInitMain() {
tsDmain.runStatus = TD_RUN_STAT_STOPPED;
tsDmain.dnodeTimer = taosTmrInit(100, 200, 60000, "DND-TMR");
if (tsDmain.dnodeTimer == NULL) {
dError("failed to init dnode timer");
return -1;
}
tscEmbedded = 1;
taosIgnSIGPIPE();
taosBlockSIGPIPE();
taosResolveCRC();
taosInitGlobalCfg();
taosReadGlobalLogCfg();
taosSetCoreDump(tsEnableCoreFile);
if (!taosMkDir(tsLogDir)) {
printf("failed to create dir: %s, reason: %s\n", tsLogDir, strerror(errno));
return -1;
}
char temp[TSDB_FILENAME_LEN];
sprintf(temp, "%s/taosdlog", tsLogDir);
if (taosInitLog(temp, tsNumOfLogLines, 1) < 0) {
printf("failed to init log file\n");
}
if (!taosReadGlobalCfg()) {
taosPrintGlobalCfg();
dError("TDengine read global config failed");
return -1;
}
dInfo("start to initialize TDengine");
taosInitNotes();
return taosCheckGlobalCfg();
}
void dnodeCleanupMain() {
if (tsDmain.dnodeTimer != NULL) {
taosTmrCleanUp(tsDmain.dnodeTimer);
tsDmain.dnodeTimer = NULL;
}
#if 0
taos_cleanup();
#endif
taosCloseLog();
taosStopCacheRefreshWorker();
}
int32_t dnodeInitStorage() {
#ifdef TD_TSZ
// compress module init
tsCompressInit();
#endif
// storage module init
if (tsDiskCfgNum == 1 && !taosMkDir(tsDataDir)) {
dError("failed to create dir:%s since %s", tsDataDir, strerror(errno));
return -1;
}
#if 0
if (tfsInit(tsDiskCfg, tsDiskCfgNum) < 0) {
dError("failed to init TFS since %s", tstrerror(terrno));
return -1;
}
strncpy(tsDataDir, TFS_PRIMARY_PATH(), TSDB_FILENAME_LEN);
#endif
sprintf(tsMnodeDir, "%s/mnode", tsDataDir);
sprintf(tsVnodeDir, "%s/vnode", tsDataDir);
sprintf(tsDnodeDir, "%s/dnode", tsDataDir);
if (!taosMkDir(tsMnodeDir)) {
dError("failed to create dir:%s since %s", tsMnodeDir, strerror(errno));
return -1;
}
if (!taosMkDir(tsDnodeDir)) {
dError("failed to create dir:%s since %s", tsDnodeDir, strerror(errno));
return -1;
}
#if 0
if (tfsMkdir("vnode") < 0) {
dError("failed to create vnode dir since %s", tstrerror(terrno));
return -1;
}
if (tfsMkdir("vnode_bak") < 0) {
dError("failed to create vnode_bak dir since %s", tstrerror(terrno));
return -1;
}
TDIR *tdir = tfsOpendir("vnode_bak/.staging");
bool stagingNotEmpty = tfsReaddir(tdir) != NULL;
tfsClosedir(tdir);
if (stagingNotEmpty) {
dError("vnode_bak/.staging dir not empty, fix it first.");
return -1;
}
if (tfsMkdir("vnode_bak/.staging") < 0) {
dError("failed to create vnode_bak/.staging dir since %s", tstrerror(terrno));
return -1;
}
dnodeCheckDataDirOpenned(tsDnodeDir);
taosGetDisk();
dnodePrintDiskInfo();
#endif
dInfo("dnode storage is initialized at %s", tsDnodeDir);
return 0;
}
void dnodeCleanupStorage() {
#if 0
// storage destroy
tfsDestroy();
#ifdef TD_TSZ
// compress destroy
tsCompressExit();
#endif
#endif
}
void dnodeReportStartup(char *name, char *desc) {
SStartupStep *startup = &tsDmain.startup;
tstrncpy(startup->name, name, strlen(startup->name));
tstrncpy(startup->desc, desc, strlen(startup->desc));
startup->finished = 0;
}
void dnodeReportStartupFinished(char *name, char *desc) {
SStartupStep *startup = &tsDmain.startup;
tstrncpy(startup->name, name, strlen(startup->name));
tstrncpy(startup->desc, desc, strlen(startup->desc));
startup->finished = 1;
}
void dnodeProcessStartupReq(SRpcMsg *pMsg) {
dInfo("startup msg is received, cont:%s", (char *)pMsg->pCont);
SStartupStep *pStep = rpcMallocCont(sizeof(SStartupStep));
memcpy(pStep, &tsDmain.startup, sizeof(SStartupStep));
dDebug("startup msg is sent, step:%s desc:%s finished:%d", pStep->name, pStep->desc, pStep->finished);
SRpcMsg rpcRsp = {.handle = pMsg->handle, .pCont = pStep, .contLen = sizeof(SStartupStep)};
rpcSendResponse(&rpcRsp);
rpcFreeCont(pMsg->pCont);
}
static int32_t dnodeStartMnode(SRpcMsg *pMsg) {
SCreateMnodeMsg *pCfg = pMsg->pCont;
pCfg->dnodeId = htonl(pCfg->dnodeId);
if (pCfg->dnodeId != dnodeGetDnodeId()) {
dDebug("dnode:%d, in create meps msg is not equal with saved dnodeId:%d", pCfg->dnodeId,
dnodeGetDnodeId());
return TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED;
}
if (strcmp(pCfg->dnodeEp, tsLocalEp) != 0) {
dDebug("dnodeEp:%s, in create meps msg is not equal with saved dnodeEp:%s", pCfg->dnodeEp, tsLocalEp);
return TSDB_CODE_MND_DNODE_EP_NOT_CONFIGURED;
}
dDebug("dnode:%d, create meps msg is received from mnodes, numOfMnodes:%d", pCfg->dnodeId, pCfg->mnodes.mnodeNum);
for (int32_t i = 0; i < pCfg->mnodes.mnodeNum; ++i) {
pCfg->mnodes.mnodeInfos[i].mnodeId = htonl(pCfg->mnodes.mnodeInfos[i].mnodeId);
dDebug("meps index:%d, meps:%d:%s", i, pCfg->mnodes.mnodeInfos[i].mnodeId, pCfg->mnodes.mnodeInfos[i].mnodeEp);
}
if (mnodeIsServing()) return 0;
return mnodeDeploy(&pCfg->mnodes);
}
void dnodeProcessCreateMnodeReq(SRpcMsg *pMsg) {
int32_t code = dnodeStartMnode(pMsg);
SRpcMsg rspMsg = {.handle = pMsg->handle, .pCont = NULL, .contLen = 0, .code = code};
rpcSendResponse(&rspMsg);
rpcFreeCont(pMsg->pCont);
}
void dnodeProcessConfigDnodeReq(SRpcMsg *pMsg) {
SCfgDnodeMsg *pCfg = pMsg->pCont;
int32_t code = taosCfgDynamicOptions(pCfg->config);
SRpcMsg rspMsg = {.handle = pMsg->handle, .pCont = NULL, .contLen = 0, .code = code};
rpcSendResponse(&rspMsg);
rpcFreeCont(pMsg->pCont);
}
RunStat dnodeGetRunStat() { return tsDmain.runStatus; }
void dnodeSetRunStat(RunStat stat) { tsDmain.runStatus = stat; }
void* dnodeGetTimer() { return tsDmain.dnodeTimer; }
\ No newline at end of file
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "cJSON.h"
#include "tglobal.h"
#include "dnodeCfg.h"
#include "dnodeEps.h"
#include "dnodeMnodeEps.h"
#include "mnode.h"
static struct {
SRpcEpSet mnodeEpSet;
SMInfos mnodeInfos;
char file[PATH_MAX + 20];
pthread_mutex_t mutex;
} tsDmeps;
static void dnodePrintMnodeEps() {
SRpcEpSet *epset = &tsDmeps.mnodeEpSet;
dInfo("print mnode eps, num:%d inuse:%d", epset->numOfEps, epset->inUse);
for (int32_t i = 0; i < epset->numOfEps; i++) {
dInfo("ep index:%d, %s:%u", i, epset->fqdn[i], epset->port[i]);
}
}
static void dnodeResetMnodeEps(SMInfos *mInfos) {
if (mInfos == NULL || mInfos->mnodeNum == 0) {
tsDmeps.mnodeEpSet.numOfEps = 1;
taosGetFqdnPortFromEp(tsFirst, tsDmeps.mnodeEpSet.fqdn[0], &tsDmeps.mnodeEpSet.port[0]);
if (strcmp(tsSecond, tsFirst) != 0) {
tsDmeps.mnodeEpSet.numOfEps = 2;
taosGetFqdnPortFromEp(tsSecond, tsDmeps.mnodeEpSet.fqdn[1], &tsDmeps.mnodeEpSet.port[1]);
}
dnodePrintMnodeEps();
return;
}
int32_t size = sizeof(SMInfos);
memcpy(&tsDmeps.mnodeInfos, mInfos, size);
tsDmeps.mnodeEpSet.inUse = tsDmeps.mnodeInfos.inUse;
tsDmeps.mnodeEpSet.numOfEps = tsDmeps.mnodeInfos.mnodeNum;
for (int32_t i = 0; i < tsDmeps.mnodeInfos.mnodeNum; i++) {
taosGetFqdnPortFromEp(tsDmeps.mnodeInfos.mnodeInfos[i].mnodeEp, tsDmeps.mnodeEpSet.fqdn[i], &tsDmeps.mnodeEpSet.port[i]);
}
dnodePrintMnodeEps();
}
static int32_t dnodeWriteMnodeEps() {
FILE *fp = fopen(tsDmeps.file, "w");
if (!fp) {
dError("failed to write %s since %s", tsDmeps.file, strerror(errno));
return -1;
}
int32_t len = 0;
int32_t maxLen = 2000;
char * content = calloc(1, maxLen + 1);
len += snprintf(content + len, maxLen - len, "{\n");
len += snprintf(content + len, maxLen - len, " \"inUse\": %d,\n", tsDmeps.mnodeInfos.inUse);
len += snprintf(content + len, maxLen - len, " \"nodeNum\": %d,\n", tsDmeps.mnodeInfos.mnodeNum);
len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n");
for (int32_t i = 0; i < tsDmeps.mnodeInfos.mnodeNum; i++) {
len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", tsDmeps.mnodeInfos.mnodeInfos[i].mnodeId);
len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", tsDmeps.mnodeInfos.mnodeInfos[i].mnodeEp);
if (i < tsDmeps.mnodeInfos.mnodeNum - 1) {
len += snprintf(content + len, maxLen - len, " },{\n");
} else {
len += snprintf(content + len, maxLen - len, " }]\n");
}
}
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
taosFsyncFile(fileno(fp));
fclose(fp);
free(content);
terrno = 0;
dInfo("successed to write %s", tsDmeps.file);
return 0;
}
static int32_t dnodeReadMnodeEps() {
int32_t len = 0;
int32_t maxLen = 2000;
char * content = calloc(1, maxLen + 1);
cJSON * root = NULL;
FILE * fp = NULL;
SMInfos mInfos = {0};
bool nodeChanged = false;
fp = fopen(tsDmeps.file, "r");
if (!fp) {
dDebug("file %s not exist", tsDmeps.file);
goto PARSE_MINFOS_OVER;
}
len = (int32_t)fread(content, 1, maxLen, fp);
if (len <= 0) {
dError("failed to read %s since content is null", tsDmeps.file);
goto PARSE_MINFOS_OVER;
}
content[len] = 0;
root = cJSON_Parse(content);
if (root == NULL) {
dError("failed to read %s since invalid json format", tsDmeps.file);
goto PARSE_MINFOS_OVER;
}
cJSON *inUse = cJSON_GetObjectItem(root, "inUse");
if (!inUse || inUse->type != cJSON_Number) {
dError("failed to read mnodeEpSet.json since inUse not found");
goto PARSE_MINFOS_OVER;
}
tsDmeps.mnodeInfos.inUse = (int8_t)inUse->valueint;
cJSON *nodeNum = cJSON_GetObjectItem(root, "nodeNum");
if (!nodeNum || nodeNum->type != cJSON_Number) {
dError("failed to read mnodeEpSet.json since nodeNum not found");
goto PARSE_MINFOS_OVER;
}
mInfos.mnodeNum = (int8_t)nodeNum->valueint;
cJSON *nodeInfos = cJSON_GetObjectItem(root, "nodeInfos");
if (!nodeInfos || nodeInfos->type != cJSON_Array) {
dError("failed to read mnodeEpSet.json since nodeInfos not found");
goto PARSE_MINFOS_OVER;
}
int32_t size = cJSON_GetArraySize(nodeInfos);
if (size != mInfos.mnodeNum) {
dError("failed to read mnodeEpSet.json since nodeInfos size not matched");
goto PARSE_MINFOS_OVER;
}
for (int32_t i = 0; i < size; ++i) {
cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i);
if (nodeInfo == NULL) continue;
cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId");
if (!nodeId || nodeId->type != cJSON_Number) {
dError("failed to read mnodeEpSet.json since nodeId not found");
goto PARSE_MINFOS_OVER;
}
cJSON *nodeEp = cJSON_GetObjectItem(nodeInfo, "nodeEp");
if (!nodeEp || nodeEp->type != cJSON_String || nodeEp->valuestring == NULL) {
dError("failed to read mnodeEpSet.json since nodeName not found");
goto PARSE_MINFOS_OVER;
}
SMInfo *mInfo = &mInfos.mnodeInfos[i];
mInfo->mnodeId = (int32_t)nodeId->valueint;
tstrncpy(mInfo->mnodeEp, nodeEp->valuestring, TSDB_EP_LEN);
bool changed = dnodeIsDnodeEpChanged(mInfo->mnodeId, mInfo->mnodeEp);
if (changed) nodeChanged = changed;
}
dInfo("successed to read file %s", tsDmeps.file);
PARSE_MINFOS_OVER:
if (content != NULL) free(content);
if (root != NULL) cJSON_Delete(root);
if (fp != NULL) fclose(fp);
terrno = 0;
for (int32_t i = 0; i < mInfos.mnodeNum; ++i) {
SMInfo *mInfo = &mInfos.mnodeInfos[i];
dnodeGetDnodeEp(mInfo->mnodeId, mInfo->mnodeEp, NULL, NULL);
}
dnodeResetMnodeEps(&mInfos);
if (nodeChanged) {
dnodeWriteMnodeEps();
}
return 0;
}
void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell) {
SRpcConnInfo connInfo = {0};
rpcGetConnInfo(rpcMsg->handle, &connInfo);
SRpcEpSet epSet = {0};
if (forShell) {
dnodeGetEpSetForShell(&epSet);
} else {
dnodeGetEpSetForPeer(&epSet);
}
dDebug("msg:%s will be redirected, dnodeIp:%s user:%s, numOfEps:%d inUse:%d", taosMsg[rpcMsg->msgType],
taosIpStr(connInfo.clientIp), connInfo.user, epSet.numOfEps, epSet.inUse);
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
dDebug("mnode index:%d %s:%d", i, epSet.fqdn[i], epSet.port[i]);
if (strcmp(epSet.fqdn[i], tsLocalFqdn) == 0) {
if ((epSet.port[i] == tsServerPort + TSDB_PORT_DNODEDNODE && !forShell) ||
(epSet.port[i] == tsServerPort && forShell)) {
epSet.inUse = (i + 1) % epSet.numOfEps;
dDebug("mnode index:%d %s:%d set inUse to %d", i, epSet.fqdn[i], epSet.port[i], epSet.inUse);
}
}
epSet.port[i] = htons(epSet.port[i]);
}
rpcSendRedirectRsp(rpcMsg->handle, &epSet);
}
int32_t dnodeInitMnodeEps() {
snprintf(tsDmeps.file, sizeof(tsDmeps.file), "%s/mnodeEpSet.json", tsDnodeDir);
pthread_mutex_init(&tsDmeps.mutex, NULL);
dnodeResetMnodeEps(NULL);
int32_t ret = dnodeReadMnodeEps();
if (ret == 0) {
dInfo("dnode mInfos is initialized");
}
return ret;
}
void dnodeCleanupMnodeEps() {
pthread_mutex_destroy(&tsDmeps.mutex);
}
void dnodeUpdateMnodeFromStatus(SMInfos *mInfos) {
if (mInfos->mnodeNum <= 0 || mInfos->mnodeNum > TSDB_MAX_REPLICA) {
dError("invalid mInfos since num:%d invalid", mInfos->mnodeNum);
return;
}
for (int32_t i = 0; i < mInfos->mnodeNum; ++i) {
SMInfo *minfo = &mInfos->mnodeInfos[i];
minfo->mnodeId = htonl(minfo->mnodeId);
if (minfo->mnodeId <= 0 || strlen(minfo->mnodeEp) <= 5) {
dError("invalid mInfo:%d since id:%d and ep:%s invalid", i, minfo->mnodeId, minfo->mnodeEp);
return;
}
}
pthread_mutex_lock(&tsDmeps.mutex);
if (mInfos->mnodeNum != tsDmeps.mnodeInfos.mnodeNum) {
dnodeResetMnodeEps(mInfos);
dnodeWriteMnodeEps();
} else {
int32_t size = sizeof(SMInfos);
if (memcmp(mInfos, &tsDmeps.mnodeInfos, size) != 0) {
dnodeResetMnodeEps(mInfos);
dnodeWriteMnodeEps();
}
}
pthread_mutex_unlock(&tsDmeps.mutex);
}
void dnodeUpdateMnodeFromPeer(SRpcEpSet *ep) {
if (ep->numOfEps <= 0) {
dError("mInfos is changed, but content is invalid, discard it");
return;
}
pthread_mutex_lock(&tsDmeps.mutex);
dInfo("mInfos is changed, numOfEps:%d inUse:%d", ep->numOfEps, ep->inUse);
for (int32_t i = 0; i < ep->numOfEps; ++i) {
ep->port[i] -= TSDB_PORT_DNODEDNODE;
dInfo("minfo:%d %s:%u", i, ep->fqdn[i], ep->port[i]);
}
tsDmeps.mnodeEpSet = *ep;
pthread_mutex_unlock(&tsDmeps.mutex);
}
void dnodeGetEpSetForPeer(SRpcEpSet *epSet) {
pthread_mutex_lock(&tsDmeps.mutex);
*epSet = tsDmeps.mnodeEpSet;
for (int32_t i = 0; i < epSet->numOfEps; ++i) {
epSet->port[i] += TSDB_PORT_DNODEDNODE;
}
pthread_mutex_unlock(&tsDmeps.mutex);
}
void dnodeGetEpSetForShell(SRpcEpSet *epSet) {
pthread_mutex_lock(&tsDmeps.mutex);
*epSet = tsDmeps.mnodeEpSet;
pthread_mutex_unlock(&tsDmeps.mutex);
}
......@@ -14,67 +14,44 @@
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "ttime.h"
#include "ttimer.h"
#include "tglobal.h"
#include "dnodeCfg.h"
#include "dnodeMsg.h"
#include "dnodeEps.h"
#include "dnodeMnodeEps.h"
#include "dnodeStatus.h"
#include "dnodeMain.h"
#include "mnode.h"
#include "tthread.h"
#include "ttime.h"
#include "vnode.h"
static struct {
void * dnodeTimer;
void * statusTimer;
uint32_t rebootTime;
} tsStatus;
static void dnodeSendStatusMsg(void *handle, void *tmrId) {
if (tsStatus.statusTimer == NULL) {
taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsStatus.dnodeTimer, &tsStatus.statusTimer);
dError("failed to start status timer");
return;
}
pthread_t *threadId;
bool stop;
uint32_t rebootTime;
} tsMsg;
static void dnodeSendStatusMsg() {
int32_t contLen = sizeof(SStatusMsg) + TSDB_MAX_VNODES * sizeof(SVnodeLoad);
SStatusMsg *pStatus = rpcMallocCont(contLen);
if (pStatus == NULL) {
taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsStatus.dnodeTimer, &tsStatus.statusTimer);
dError("failed to malloc status message");
return;
}
dnodeGetCfg(&pStatus->dnodeId, pStatus->clusterId);
pStatus->dnodeId = htonl(dnodeGetDnodeId());
pStatus->version = htonl(tsVersion);
pStatus->lastReboot = htonl(tsStatus.rebootTime);
pStatus->numOfCores = htons((uint16_t)tsNumOfCores);
pStatus->diskAvailable = tsAvailDataDirGB;
pStatus->alternativeRole = tsAlternativeRole;
pStatus->dnodeId = htonl(dnodeGetDnodeId());
tstrncpy(pStatus->dnodeEp, tsLocalEp, TSDB_EP_LEN);
pStatus->clusterId = htobe64(dnodeGetClusterId());
pStatus->lastReboot = htonl(tsMsg.rebootTime);
pStatus->numOfCores = htonl(tsNumOfCores);
pStatus->diskAvailable = tsAvailDataDirGB;
// fill cluster cfg parameters
pStatus->clusterCfg.numOfMnodes = htonl(tsNumOfMnodes);
pStatus->clusterCfg.mnodeEqualVnodeNum = htonl(tsMnodeEqualVnodeNum);
pStatus->clusterCfg.offlineThreshold = htonl(tsOfflineThreshold);
pStatus->clusterCfg.statusInterval = htonl(tsStatusInterval);
pStatus->clusterCfg.maxtablesPerVnode = htonl(tsMaxTablePerVnode);
pStatus->clusterCfg.maxVgroupsPerDb = htonl(tsMaxVgroupsPerDb);
tstrncpy(pStatus->clusterCfg.arbitrator, tsArbitrator, TSDB_EP_LEN);
tstrncpy(pStatus->clusterCfg.timezone, tsTimezone, 64);
pStatus->clusterCfg.checkTime = 0;
tstrncpy(pStatus->clusterCfg.timezone, tsTimezone, 64);
char timestr[32] = "1970-01-01 00:00:00.00";
(void)taosParseTime(timestr, &pStatus->clusterCfg.checkTime, (int32_t)strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0);
tstrncpy(pStatus->clusterCfg.locale, tsLocale, TSDB_LOCALE_LEN);
tstrncpy(pStatus->clusterCfg.charset, tsCharset, TSDB_LOCALE_LEN);
pStatus->clusterCfg.enableBalance = tsEnableBalance;
pStatus->clusterCfg.flowCtrl = tsEnableFlowCtrl;
pStatus->clusterCfg.slaveQuery = tsEnableSlaveQuery;
pStatus->clusterCfg.adjustMaster = tsEnableAdjustMaster;
vnodeGetStatus(pStatus);
contLen = sizeof(SStatusMsg) + pStatus->openVnodes * sizeof(SVnodeLoad);
pStatus->openVnodes = htons(pStatus->openVnodes);
......@@ -85,52 +62,113 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
}
void dnodeProcessStatusRsp(SRpcMsg *pMsg) {
if (pMsg->code != TSDB_CODE_SUCCESS) {
dError("status rsp is received, error:%s", tstrerror(pMsg->code));
if (pMsg->code == TSDB_CODE_MND_DNODE_NOT_EXIST) {
char clusterId[TSDB_CLUSTER_ID_LEN];
dnodeGetClusterId(clusterId);
if (clusterId[0] != '\0') {
dnodeSetDropped();
dError("exit zombie dropped dnode");
exit(EXIT_FAILURE);
}
}
taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsStatus.dnodeTimer, &tsStatus.statusTimer);
return;
}
dTrace("status rsp is received, code:%s", tstrerror(pMsg->code));
if (pMsg->code != TSDB_CODE_SUCCESS) return;
SStatusRsp *pStatusRsp = pMsg->pCont;
SMInfos * minfos = &pStatusRsp->mnodes;
dnodeUpdateMnodeFromStatus(minfos);
SDnodeCfg *pCfg = &pStatusRsp->dnodeCfg;
pCfg->numOfVnodes = htonl(pCfg->numOfVnodes);
pCfg->moduleStatus = htonl(pCfg->moduleStatus);
pCfg->dnodeId = htonl(pCfg->dnodeId);
pCfg->clusterId = htobe64(pCfg->clusterId);
pCfg->numOfVnodes = htonl(pCfg->numOfVnodes);
pCfg->numOfDnodes = htonl(pCfg->numOfDnodes);
dnodeUpdateCfg(pCfg);
if (pCfg->dropped) {
dError("status rsp is received, and set dnode to drop status");
return;
}
vnodeSetAccess(pStatusRsp->vgAccess, pCfg->numOfVnodes);
SDnodeEps *pEps = (SDnodeEps *)((char *)pStatusRsp->vgAccess + pCfg->numOfVnodes * sizeof(SVgroupAccess));
dnodeUpdateEps(pEps);
SDnodeEps *eps = (SDnodeEps *)((char *)pStatusRsp->vgAccess + pCfg->numOfVnodes * sizeof(SVgroupAccess));
eps->dnodeNum = htonl(eps->dnodeNum);
for (int32_t i = 0; i < eps->dnodeNum; ++i) {
eps->dnodeEps[i].dnodeId = htonl(eps->dnodeEps[i].dnodeId);
eps->dnodeEps[i].dnodePort = htons(eps->dnodeEps[i].dnodePort);
}
dnodeUpdateDnodeEps(eps);
}
taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsStatus.dnodeTimer, &tsStatus.statusTimer);
static void *dnodeThreadRoutine(void *param) {
int32_t ms = tsStatusInterval * 1000;
while (!tsMsg.stop) {
taosMsleep(ms);
dnodeSendStatusMsg();
}
}
int32_t dnodeInitStatus() {
tsStatus.statusTimer = NULL;
tsStatus.dnodeTimer = dnodeGetTimer();
tsStatus.rebootTime = taosGetTimestampSec();
taosTmrReset(dnodeSendStatusMsg, 500, NULL, tsStatus.dnodeTimer, &tsStatus.statusTimer);
dInfo("dnode status timer is initialized");
return TSDB_CODE_SUCCESS;
int32_t dnodeInitMsg() {
tsMsg.stop = false;
tsMsg.rebootTime = taosGetTimestampSec();
tsMsg.threadId = taosCreateThread(dnodeThreadRoutine, NULL);
if (tsMsg.threadId == NULL) {
return -1;
}
dInfo("dnode msg is initialized");
return 0;
}
void dnodeCleanupStatus() {
if (tsStatus.statusTimer != NULL) {
taosTmrStopA(&tsStatus.statusTimer);
tsStatus.statusTimer = NULL;
void dnodeCleanupMsg() {
if (tsMsg.threadId != NULL) {
tsMsg.stop = true;
taosDestoryThread(tsMsg.threadId);
tsMsg.threadId = NULL;
}
dInfo("dnode msg is cleanuped");
}
static int32_t dnodeStartMnode(SRpcMsg *pMsg) {
SCreateMnodeMsg *pCfg = pMsg->pCont;
pCfg->dnodeId = htonl(pCfg->dnodeId);
pCfg->mnodeNum = htonl(pCfg->mnodeNum);
for (int32_t i = 0; i < pCfg->mnodeNum; ++i) {
pCfg->mnodeEps[i].dnodeId = htonl(pCfg->mnodeEps[i].dnodeId);
pCfg->mnodeEps[i].dnodePort = htons(pCfg->mnodeEps[i].dnodePort);
}
if (pCfg->dnodeId != dnodeGetDnodeId()) {
dDebug("dnode:%d, in create meps msg is not equal with saved dnodeId:%d", pCfg->dnodeId, dnodeGetDnodeId());
return TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED;
}
if (mnodeGetStatus() == MN_STATUS_READY) return 0;
return mnodeDeploy();
}
void dnodeProcessCreateMnodeReq(SRpcMsg *pMsg) {
int32_t code = dnodeStartMnode(pMsg);
SRpcMsg rspMsg = {.handle = pMsg->handle, .pCont = NULL, .contLen = 0, .code = code};
rpcSendResponse(&rspMsg);
rpcFreeCont(pMsg->pCont);
}
void dnodeProcessConfigDnodeReq(SRpcMsg *pMsg) {
SCfgDnodeMsg *pCfg = pMsg->pCont;
int32_t code = taosCfgDynamicOptions(pCfg->config);
SRpcMsg rspMsg = {.handle = pMsg->handle, .pCont = NULL, .contLen = 0, .code = code};
rpcSendResponse(&rspMsg);
rpcFreeCont(pMsg->pCont);
}
void dnodeProcessStartupReq(SRpcMsg *pMsg) {
dInfo("startup msg is received, cont:%s", (char *)pMsg->pCont);
SStartupStep *pStep = rpcMallocCont(sizeof(SStartupStep));
dnodeGetStartup(pStep);
dDebug("startup msg is sent, step:%s desc:%s finished:%d", pStep->name, pStep->desc, pStep->finished);
SRpcMsg rpcRsp = {.handle = pMsg->handle, .pCont = pStep, .contLen = sizeof(SStartupStep)};
rpcSendResponse(&rpcRsp);
rpcFreeCont(pMsg->pCont);
}
......@@ -8,4 +8,5 @@ target_include_directories(
target_link_libraries(
mnode
PUBLIC transport
PUBLIC cjson
)
\ No newline at end of file
......@@ -13,19 +13,20 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_MNODE_READ_H
#define TDENGINE_MNODE_READ_H
#ifndef _TD_MNODE_ACCT_H_
#define _TD_MNODE_ACCT_H_
#include "mnodeInt.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "mnodeDef.h"
void mnodeAddReadMsgHandle(uint8_t msgType, int32_t (*fp)(SMnodeMsg *mnodeMsg));
int32_t mnodeProcessRead(SMnodeMsg *pMsg);
int32_t mnodeInitAcct();
void mnodeCleanupAcct();
#ifdef __cplusplus
}
#endif
#endif
#endif /*_TD_MNODE_ACCT_H_*/
......@@ -13,19 +13,20 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_MNODE_WRITE_H
#define TDENGINE_MNODE_WRITE_H
#ifndef _TD_MNODE_AUTH_H_
#define _TD_MNODE_AUTH_H_
#include "mnodeInt.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "mnodeDef.h"
void mnodeAddWriteMsgHandle(uint8_t msgType, int32_t (*fp)(SMnodeMsg *mnodeMsg));
int32_t mnodeProcessRead(SMnodeMsg *pMsg);
int32_t mnodeInitAuth();
void mnodeCleanupAuth();
#ifdef __cplusplus
}
#endif
#endif
#endif /*_TD_MNODE_AUTH_H_*/
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_MNODE_BALANCE_H_
#define _TD_MNODE_BALANCE_H_
#include "mnodeInt.h"
#ifdef __cplusplus
extern "C" {
#endif
int32_t mnodeInitBalance();
void mnodeCleanupBalance();
#ifdef __cplusplus
}
#endif
#endif /*_TD_MNODE_BALANCE_H_*/
......@@ -13,8 +13,10 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_MNODE_CLUSTER_H
#define TDENGINE_MNODE_CLUSTER_H
#ifndef _TD_MNODE_CLUSTER_H_
#define _TD_MNODE_CLUSTER_H_
#include "mnodeInt.h"
#ifdef __cplusplus
extern "C" {
......@@ -22,13 +24,9 @@ extern "C" {
int32_t mnodeInitCluster();
void mnodeCleanupCluster();
void mnodeUpdateClusterId();
const char* mnodeGetClusterId();
int32_t mnodeCompactCluster();
#ifdef __cplusplus
}
#endif
#endif
#endif /*_TD_MNODE_CLUSTER_H_*/
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_MNODE_DATABASE_H_
#define _TD_MNODE_DATABASE_H_
#include "mnodeInt.h"
#ifdef __cplusplus
extern "C" {
#endif
int32_t mnodeInitDb();
void mnodeCleanupDb();
#ifdef __cplusplus
}
#endif
#endif /*_TD_MNODE_DATABASE_H_*/
......@@ -13,149 +13,175 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_MNODE_DEF_H
#define TDENGINE_MNODE_DEF_H
#ifndef _TD_MNODE_DEF_H_
#define _TD_MNODE_DEF_H_
#include "os.h"
#include "taosmsg.h"
#include "tlog.h"
#include "trpc.h"
#include "ttimer.h"
#include "thash.h"
#include "cJSON.h"
#include "mnode.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "taosdef.h"
#include "taosmsg.h"
extern int32_t mDebugFlag;
struct SVgObj;
struct SDbObj;
struct SAcctObj;
struct SUserObj;
struct SMnodeObj;
// mnode log function
#define mFatal(...) { if (mDebugFlag & DEBUG_FATAL) { taosPrintLog("MND FATAL ", 255, __VA_ARGS__); }}
#define mError(...) { if (mDebugFlag & DEBUG_ERROR) { taosPrintLog("MND ERROR ", 255, __VA_ARGS__); }}
#define mWarn(...) { if (mDebugFlag & DEBUG_WARN) { taosPrintLog("MND WARN ", 255, __VA_ARGS__); }}
#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND ", 255, __VA_ARGS__); }}
#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND ", mDebugFlag, __VA_ARGS__); }}
#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", mDebugFlag, __VA_ARGS__); }}
/*
struct define notes:
1. The first field must be the xxxxId field or name field , e.g. 'int32_t dnodeId', 'int32_t mnodeId', 'char name[]', 'char user[]', ...
2. From the dnodeId field to the updataEnd field, these information will be falled disc;
3. The fields behind the updataEnd field can be changed;
*/
// #define mLError(...) { monSaveLog(2, __VA_ARGS__); mError(__VA_ARGS__) }
// #define mLWarn(...) { monSaveLog(1, __VA_ARGS__); mWarn(__VA_ARGS__) }
// #define mLInfo(...) { monSaveLog(0, __VA_ARGS__); mInfo(__VA_ARGS__) }
#define mLError(...) {mError(__VA_ARGS__) }
#define mLWarn(...) {mWarn(__VA_ARGS__) }
#define mLInfo(...) {mInfo(__VA_ARGS__) }
typedef struct SClusterObj SClusterObj;
typedef struct SDnodeObj SDnodeObj;
typedef struct SMnodeObj SMnodeObj;
typedef struct SAcctObj SAcctObj;
typedef struct SUserObj SUserObj;
typedef struct SDbObj SDbObj;
typedef struct SVgObj SVgObj;
typedef struct SSTableObj SSTableObj;
typedef struct SFuncObj SFuncObj;
typedef struct SOperObj SOperObj;
typedef struct SMnMsg SMnMsg;
typedef enum {
MN_SDB_START = 0,
MN_SDB_CLUSTER = 1,
MN_SDB_DNODE = 2,
MN_SDB_MNODE = 3,
MN_SDB_ACCT = 4,
MN_SDB_AUTH = 5,
MN_SDB_USER = 6,
MN_SDB_DB = 7,
MN_SDB_VGROUP = 8,
MN_SDB_STABLE = 9,
MN_SDB_FUNC = 10,
MN_SDB_OPER = 11,
MN_SDB_MAX = 12
} EMnSdb;
typedef enum { MN_OP_START = 0, MN_OP_INSERT = 1, MN_OP_UPDATE = 2, MN_OP_DELETE = 3, MN_OP_MAX = 4 } EMnOp;
typedef enum { MN_KEY_START = 0, MN_KEY_BINARY = 1, MN_KEY_INT32 = 2, MN_KEY_INT64 = 3, MN_KEY_MAX } EMnKey;
typedef enum {
MN_AUTH_ACCT_START = 0,
MN_AUTH_ACCT_USER,
MN_AUTH_ACCT_DNODE,
MN_AUTH_ACCT_MNODE,
MN_AUTH_ACCT_DB,
MN_AUTH_ACCT_TABLE,
MN_AUTH_ACCT_MAX
} EMnAuthAcct;
typedef enum {
MN_AUTH_OP_START = 0,
MN_AUTH_OP_CREATE_USER,
MN_AUTH_OP_ALTER_USER,
MN_AUTH_OP_DROP_USER,
MN_AUTH_MAX
} EMnAuthOp;
typedef enum { MN_SDB_STAT_AVAIL = 0, MN_SDB_STAT_DROPPED = 1 } EMnSdbStat;
typedef struct {
int8_t type;
int8_t status;
int8_t align[6];
} SdbHead;
typedef struct SClusterObj {
SdbHead head;
int64_t id;
char uid[TSDB_CLUSTER_ID_LEN];
int64_t createdTime;
int8_t reserved[12];
int8_t updateEnd[4];
int32_t refCount;
int64_t updateTime;
} SClusterObj;
typedef struct SDnodeObj {
int32_t dnodeId;
int32_t openVnodes;
int64_t createdTime;
int64_t lastAccess;
int32_t customScore; // config by user
uint16_t numOfCores; // from dnode status msg
uint16_t dnodePort;
char dnodeFqdn[TSDB_FQDN_LEN];
char dnodeEp[TSDB_EP_LEN];
int8_t alternativeRole; // from dnode status msg, 0-any, 1-mgmt, 2-dnode
int8_t status; // set in balance function
int8_t isMgmt;
int8_t reserve1[11];
int8_t updateEnd[4];
int32_t refCount;
uint32_t moduleStatus;
uint32_t lastReboot; // time stamp for last reboot
float score; // calc in balance function
float diskAvailable; // from dnode status msg
int16_t diskAvgUsage; // calc from sys.disk
int16_t cpuAvgUsage; // calc from sys.cpu
int16_t memoryAvgUsage; // calc from sys.mem
int16_t bandwidthUsage; // calc from sys.band
int8_t offlineReason;
int8_t reserved2[1];
SdbHead head;
int32_t id;
int32_t vnodes;
int64_t createdTime;
int64_t updateTime;
int64_t lastAccess;
int64_t lastReboot; // time stamp for last reboot
char fqdn[TSDB_FQDN_LEN];
char ep[TSDB_EP_LEN];
uint16_t port;
int16_t numOfCores; // from dnode status msg
int8_t alternativeRole; // from dnode status msg, 0-any, 1-mgmt, 2-dnode
int8_t status; // set in balance function
int8_t offlineReason;
} SDnodeObj;
typedef struct SMnodeObj {
int32_t mnodeId;
int8_t reserved0[4];
int64_t createdTime;
int8_t reserved1[4];
int8_t updateEnd[4];
int32_t refCount;
SdbHead head;
int32_t id;
int8_t status;
int8_t role;
int32_t roleTerm;
int64_t roleTime;
int8_t reserved2[3];
} SMnodeObj;
typedef struct STableObj {
char *tableId;
int8_t type;
} STableObj;
typedef struct SSTableObj {
STableObj info;
int8_t reserved0[9]; // for fill struct STableObj to 4byte align
int16_t nextColId;
int32_t sversion;
uint64_t uid;
int64_t createdTime;
int32_t tversion;
int32_t numOfColumns;
int32_t numOfTags;
int8_t updateEnd[4];
int32_t refCount;
int32_t numOfTables;
SSchema * schema;
void * vgHash;
} SSTableObj;
int64_t updateTime;
SDnodeObj *pDnode;
} SMnodeObj;
typedef struct {
STableObj info;
int8_t reserved0[9]; // for fill struct STableObj to 4byte align
int16_t nextColId; //used by normal table
int32_t sversion; //used by normal table
uint64_t uid;
uint64_t suid;
int64_t createdTime;
int32_t numOfColumns; //used by normal table
int32_t tid;
int32_t vgId;
int32_t sqlLen;
int8_t updateEnd[4];
int32_t refCount;
char* sql; //used by normal table
SSchema* schema; //used by normal table
SSTableObj*superTable;
} SCTableObj;
int32_t maxUsers;
int32_t maxDbs;
int32_t maxTimeSeries;
int32_t maxStreams;
int64_t maxStorage; // In unit of GB
int8_t accessState; // Configured only by command
} SAcctCfg;
typedef struct {
int32_t dnodeId;
int8_t role;
int8_t vver[3]; // To ensure compatibility, 3 bits are used to represent the remainder of 64 bit version
SDnodeObj *pDnode;
} SVnodeGid;
int32_t numOfUsers;
int32_t numOfDbs;
int32_t numOfTimeSeries;
int32_t numOfStreams;
int64_t totalStorage; // Total storage wrtten from this account
int64_t compStorage; // Compressed storage on disk
} SAcctInfo;
typedef struct SVgObj {
uint32_t vgId;
int32_t numOfVnodes;
int64_t createdTime;
int32_t lbDnodeId;
int32_t lbTime;
char dbName[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
int8_t inUse;
int8_t accessState;
int8_t status;
int8_t reserved0[4];
SVnodeGid vnodeGid[TSDB_MAX_REPLICA];
int32_t vgCfgVersion;
int8_t compact;
int8_t reserved1[8];
int8_t updateEnd[4];
int32_t refCount;
int32_t numOfTables;
int64_t totalStorage;
int64_t compStorage;
int64_t pointsWritten;
struct SDbObj *pDb;
void * idPool;
} SVgObj;
typedef struct SAcctObj {
SdbHead head;
char acct[TSDB_USER_LEN];
int64_t createdTime;
int64_t updateTime;
int32_t acctId;
int8_t status;
SAcctCfg cfg;
SAcctInfo info;
} SAcctObj;
typedef struct SUserObj {
SdbHead head;
char user[TSDB_USER_LEN];
char pass[TSDB_KEY_LEN];
char acct[TSDB_USER_LEN];
int64_t createdTime;
int64_t updateTime;
int8_t rootAuth;
SHashObj *prohibitDbHash;
SAcctObj *pAcct;
} SUserObj;
typedef struct {
int32_t cacheBlockSize;
......@@ -178,115 +204,117 @@ typedef struct {
int8_t cacheLastRow;
int8_t dbType;
int16_t partitions;
int8_t reserved[7];
} SDbCfg;
typedef struct SDbObj {
char name[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
int8_t reserved0[4];
char acct[TSDB_USER_LEN];
int64_t createdTime;
int32_t dbCfgVersion;
SDbCfg cfg;
int8_t status;
int8_t reserved1[11];
int8_t updateEnd[4];
int32_t refCount;
int32_t numOfVgroups;
int32_t numOfTables;
int32_t numOfSuperTables;
int32_t vgListSize;
int32_t vgListIndex;
SVgObj **vgList;
struct SAcctObj *pAcct;
pthread_mutex_t mutex;
SdbHead head;
char name[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
char acct[TSDB_USER_LEN];
int64_t createdTime;
int64_t updateTime;
SDbCfg cfg;
int8_t status;
int32_t numOfVgroups;
int32_t numOfTables;
int32_t numOfSuperTables;
int32_t vgListSize;
int32_t vgListIndex;
SVgObj **vgList;
SAcctObj *pAcct;
} SDbObj;
typedef struct SUserObj {
char user[TSDB_USER_LEN];
char pass[TSDB_KEY_LEN];
char acct[TSDB_USER_LEN];
int64_t createdTime;
int8_t superAuth;
int8_t writeAuth;
int8_t reserved[10];
int8_t updateEnd[4];
int32_t refCount;
struct SAcctObj * pAcct;
} SUserObj;
typedef struct SFuncObj {
char name[TSDB_FUNC_NAME_LEN];
char path[128];
int32_t contLen;
char cont[TSDB_FUNC_CODE_LEN];
int32_t funcType;
int32_t bufSize;
int64_t createdTime;
uint8_t resType;
int16_t resBytes;
int64_t sig; // partial md5 sign
int16_t type; // [lua script|so|js]
int8_t reserved[64];
int8_t updateEnd[4];
int32_t refCount;
} SFuncObj;
typedef struct {
int64_t totalStorage; // Total storage wrtten from this account
int64_t compStorage; // Compressed storage on disk
int64_t queryTime;
int64_t totalPoints;
int64_t inblound;
int64_t outbound;
int64_t sKey;
int32_t numOfUsers;
int32_t numOfDbs;
int32_t numOfTimeSeries;
int32_t numOfPointsPerSecond;
int32_t numOfConns;
int32_t numOfQueries;
int32_t numOfStreams;
int8_t accessState; // Checked by mgmt heartbeat message
int8_t reserved[3];
} SAcctInfo;
int32_t dnodeId;
int8_t role;
SDnodeObj *pDnode;
} SVnodeGid;
typedef struct SAcctObj {
char user[TSDB_USER_LEN];
char pass[TSDB_KEY_LEN];
SAcctCfg cfg;
typedef struct SVgObj {
uint32_t vgId;
int32_t numOfVnodes;
int64_t createdTime;
int32_t acctId;
int64_t updateTime;
int32_t lbDnodeId;
int32_t lbTime;
char dbName[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
int8_t inUse;
int8_t accessState;
int8_t status;
int8_t reserved0[7];
int8_t updateEnd[4];
int32_t refCount;
int8_t reserved1[4];
SAcctInfo acctInfo;
pthread_mutex_t mutex;
} SAcctObj;
SVnodeGid vnodeGid[TSDB_MAX_REPLICA];
int32_t vgCfgVersion;
int8_t compact;
int32_t numOfTables;
int64_t totalStorage;
int64_t compStorage;
int64_t pointsWritten;
SDbObj *pDb;
} SVgObj;
typedef struct SSTableObj {
SdbHead head;
char tableId[TSDB_TABLE_NAME_LEN];
uint64_t uid;
int64_t createdTime;
int64_t updateTime;
int32_t numOfColumns; // used by normal table
int32_t numOfTags;
SSchema * schema;
} SSTableObj;
typedef struct SFuncObj {
SdbHead head;
char name[TSDB_FUNC_NAME_LEN];
char path[128];
int32_t contLen;
char cont[TSDB_FUNC_CODE_LEN];
int32_t funcType;
int32_t bufSize;
int64_t createdTime;
uint8_t resType;
int16_t resBytes;
int64_t sig;
int16_t type;
} SFuncObj;
typedef struct {
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
int8_t type;
int8_t maxReplica;
int16_t numOfColumns;
int32_t index;
int32_t rowSize;
int32_t numOfRows;
void * pIter;
void * pVgIter;
void ** ppShow;
int16_t offset[TSDB_MAX_COLUMNS];
int32_t bytes[TSDB_MAX_COLUMNS];
int32_t numOfReads;
int8_t maxReplica;
int8_t reserved0[1];
uint16_t payloadLen;
void *pIter;
void *pVgIter;
void **ppShow;
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
int16_t offset[TSDB_MAX_COLUMNS];
int32_t bytes[TSDB_MAX_COLUMNS];
char payload[];
} SShowObj;
typedef struct {
int32_t len;
void *rsp;
} SMnRsp;
typedef struct SMnMsg {
void (*fp)(SMnMsg *pMsg, int32_t code);
SUserObj *pUser;
int16_t received;
int16_t successed;
int16_t expected;
int16_t retry;
int32_t code;
int64_t createdTime;
SMnRsp rpcRsp;
SRpcMsg rpcMsg;
char pCont[];
} SMnReq;
#ifdef __cplusplus
}
#endif
#endif
#endif /*_TD_MNODE_DEF_H_*/
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_MNODE_DNODE_H_
#define _TD_MNODE_DNODE_H_
#include "mnodeInt.h"
#ifdef __cplusplus
extern "C" {
#endif
int32_t mnodeInitDnode();
void mnodeCleanupDnode();
#ifdef __cplusplus
}
#endif
#endif /*_TD_MNODE_DNODE_H_*/
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -16,6 +16,7 @@ target_link_libraries(
PUBLIC tq
PUBLIC tsdb
PUBLIC wal
PUBLIC sync
PUBLIC cjson
)
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册