提交 4a59e130 编写于 作者: H Haojun Liao

other: merge 3.0.

......@@ -4,11 +4,6 @@ import jenkins.model.CauseOfInterruption
node {
}
win_test_stage = 0
linux_ready = 0
linux_node_ip = ""
linux_node_pass = ""
def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME
def currentBuildNumber = env.BUILD_NUMBER.toInteger()
......@@ -43,10 +38,8 @@ def pre_test(){
sh '''
cd ${WK}
git reset --hard
git fetch || git fetch
cd ${WKC}
git reset --hard
git fetch || git fetch
'''
script {
if (env.CHANGE_TARGET == 'master') {
......@@ -82,9 +75,11 @@ def pre_test(){
if (env.CHANGE_URL =~ /\/TDengine\//) {
sh '''
cd ${WKC}
git remote prune origin
git pull >/dev/null
git log -5
echo "`date "+%Y%m%d-%H%M%S"` ${JOB_NAME}:${BRANCH_NAME}:${BUILD_ID}:${CHANGE_TARGET}" >>${WKDIR}/jenkins.log
echo "CHANGE_BRANCH:${CHANGE_BRANCH}" >>${WKDIR}/jenkins.log
echo "community log: `git log -5`" >>${WKDIR}/jenkins.log
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
......@@ -101,12 +96,14 @@ def pre_test(){
git pull >/dev/null
git log -5
echo "`date "+%Y%m%d-%H%M%S"` ${JOB_NAME}:${BRANCH_NAME}:${BUILD_ID}:${CHANGE_TARGET}" >>${WKDIR}/jenkins.log
echo "CHANGE_BRANCH:${CHANGE_BRANCH}" >>${WKDIR}/jenkins.log
echo "tdinternal log: `git log -5`" >>${WKDIR}/jenkins.log
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
git log -5
echo "tdinternal log merged: `git log -5`" >>${WKDIR}/jenkins.log
cd ${WKC}
git remote prune origin
git pull >/dev/null
git log -5
echo "community log: `git log -5`" >>${WKDIR}/jenkins.log
......@@ -137,53 +134,51 @@ def pre_test_win(){
set
date /t
time /t
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug || exit 0
rd /s /Q %WIN_INTERNAL_ROOT%\\debug || exit 0
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git reset --hard
git fetch || git fetch
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git reset --hard
git fetch || git fetch
'''
script {
if (env.CHANGE_TARGET == 'master') {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git checkout master
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git checkout master
'''
} else if(env.CHANGE_TARGET == '2.0') {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git checkout 2.0
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git checkout 2.0
'''
} else if(env.CHANGE_TARGET == '3.0') {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git checkout 3.0
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git checkout 3.0
'''
} else {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git checkout develop
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git checkout develop
'''
}
......@@ -191,36 +186,38 @@ def pre_test_win(){
script {
if (env.CHANGE_URL =~ /\/TDengine\//) {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git pull
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git remote prune origin
git pull
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git fetch origin +refs/pull/%CHANGE_ID%/merge
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git checkout -qf FETCH_HEAD
'''
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git pull
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git fetch origin +refs/pull/%CHANGE_ID%/merge
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git checkout -qf FETCH_HEAD
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git remote prune origin
git pull
'''
} else {
......@@ -230,27 +227,27 @@ def pre_test_win(){
}
}
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git branch
git log -5
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git branch
git log -5
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git submodule update --init --recursive
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\taos-connector-python
cd %WIN_CONNECTOR_ROOT%
git branch
git reset --hard
git pull
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\taos-connector-python
cd %WIN_CONNECTOR_ROOT%
git log -5
'''
}
......@@ -258,7 +255,7 @@ def pre_test_build_win() {
bat '''
echo "building ..."
time /t
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
mkdir debug
cd debug
time /t
......@@ -273,9 +270,9 @@ def pre_test_build_win() {
time /t
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\taos-connector-python
cd %WIN_CONNECTOR_ROOT%
python -m pip install .
xcopy /e/y/i/f C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
'''
return 1
}
......@@ -283,24 +280,22 @@ def run_win_ctest() {
bat '''
echo "windows ctest ..."
time /t
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug
cd %WIN_INTERNAL_ROOT%\\debug
ctest -j 1 || exit 7
time /t
'''
}
def run_win_test() {
echo "LINUX NODE: ${linux_node_ip} - ${linux_node_pass}"
bat '''
echo "windows test ..."
cd C:\\workspace\\%EXECUTOR_NUMBER%\\taos-connector-python
cd %WIN_CONNECTOR_ROOT%
python -m pip install .
xcopy /e/y/i/f C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
ls -l C:\\Windows\\System32\\taos.dll
time /t
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community\\tests\\system-test
echo "node: ''' + linux_node_ip + ''':''' + linux_node_pass + '''"
cd %WIN_SYSTEM_TEST_ROOT%
echo "testing ..."
test-all.bat "{\\\"host\\\":\\\"''' + linux_node_ip + '''\\\",\\\"port\\\":22,\\\"user\\\":\\\"root\\\",\\\"password\\\":\\\"''' + linux_node_pass + '''\\\",\\\"path\\\":\\\"/var/lib/jenkins/workspace/TDinternal\\\"}"
test-all.bat ci
time /t
'''
}
......@@ -319,23 +314,21 @@ pipeline {
parallel {
stage('windows test') {
agent{label " windows10_01 || windows10_02 || windows10_03 || windows10_04 "}
environment{
WIN_INTERNAL_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal"
WIN_COMMUNITY_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community"
WIN_SYSTEM_TEST_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community\\tests\\system-test"
WIN_CONNECTOR_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\taos-connector-python"
}
steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 55, unit: 'MINUTES'){
pre_test_win()
pre_test_build_win()
run_win_ctest()
script {
while(linux_ready == 0) {
sleep(8)
}
}
run_win_test()
}
}
script {
win_test_stage = 1
}
}
}
stage('linux test') {
......@@ -346,18 +339,15 @@ pipeline {
}
steps {
script {
linux_node_ip = sh (
script: 'jq .ip /home/node_info.json | sed "s/\\\"//g"',
returnStdout: true
).trim()
linux_node_pass = sh (
script: 'jq .password /home/node_info.json | sed "s/\\\"//g" |sed "s/\\!/^^^^^^^^\\!/g"',
def linux_node_ip = sh (
script: 'ip addr|grep 192|grep -v virbr|awk "{print \\\$2}"|sed "s/\\/.*//"',
returnStdout: true
).trim()
echo "${linux_node_ip}:${linux_node_pass}"
echo "${linux_node_ip}"
echo "${WKDIR}/restore.sh -p ${BRANCH_NAME} -n ${BUILD_ID} -c {container name}"
}
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 40, unit: 'MINUTES'){
timeout(time: 120, unit: 'MINUTES'){
pre_test()
script {
sh '''
......@@ -368,11 +358,36 @@ pipeline {
rm -f /tmp/cases.task
./collect_cases.sh -e
'''
def extra_param = ""
def log_server_file = "/home/log_server.json"
def timeout_cmd = ""
if (fileExists(log_server_file)) {
def log_server_enabled = sh (
script: 'jq .enabled ' + log_server_file,
returnStdout: true
).trim()
def timeout_param = sh (
script: 'jq .timeout ' + log_server_file,
returnStdout: true
).trim()
if (timeout_param != "null" && timeout_param != "0") {
timeout_cmd = "timeout " + timeout_param
}
if (log_server_enabled == "1") {
def log_server = sh (
script: 'jq .server ' + log_server_file + ' | sed "s/\\\"//g"',
returnStdout: true
).trim()
if (log_server != "null" && log_server != "") {
extra_param = "-w " + log_server
}
}
}
sh '''
cd ${WKC}/tests/parallel_test
export DEFAULT_RETRY_TIME=2
date
timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480
''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480 ''' + extra_param + '''
'''
}
}
......@@ -387,38 +402,9 @@ pipeline {
cd ${WKC}/packaging
./release.sh -v cluster -n 3.0.0.100 -s static
'''
sh '''
echo "install ..."
cd ${WKC}/release
tar xzf TDengine-enterprise-server-3.0.0.100-Linux-x64.tar.gz
cd TDengine-enterprise-server-3.0.0.100
service taosd stop || :
rm -rf /var/lib/taos
./install.sh -e no
'''
sh '''
echo "checking ..."
which taos
which taosd
rm -rf ${WK}/debug
mv ${WKC}/debug ${WK}/
'''
sh '''
echo "install taospy ..."
cd ${WKPY}
pip3 install .
'''
}
}
}
script {
linux_ready = 1
}
script {
while(win_test_stage == 0){
sleep(12)
}
}
}
}
}
......
......@@ -70,5 +70,46 @@ ELSEIF (${CMAKE_SYSTEM_NAME} MATCHES "Windows")
ENDIF()
IF ("${CPUTYPE}" STREQUAL "")
MESSAGE(STATUS "The current platform " ${CMAKE_SYSTEM_PROCESSOR} " is detected")
IF (CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64)|(AMD64)")
MESSAGE(STATUS "The current platform is amd64")
SET(PLATFORM_ARCH_STR "amd64")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)")
MESSAGE(STATUS "The current platform is x86")
SET(PLATFORM_ARCH_STR "i386")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "armv7l")
MESSAGE(STATUS "The current platform is aarch32")
SET(PLATFORM_ARCH_STR "arm")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
MESSAGE(STATUS "The current platform is aarch64")
SET(PLATFORM_ARCH_STR "arm64")
ENDIF ()
ELSE ()
# if generate ARM version:
# cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64
IF (${CPUTYPE} MATCHES "aarch32")
SET(PLATFORM_ARCH_STR "arm")
MESSAGE(STATUS "input cpuType: aarch32")
ELSEIF (${CPUTYPE} MATCHES "aarch64")
SET(PLATFORM_ARCH_STR "arm64")
MESSAGE(STATUS "input cpuType: aarch64")
ELSEIF (${CPUTYPE} MATCHES "mips64")
SET(PLATFORM_ARCH_STR "mips")
MESSAGE(STATUS "input cpuType: mips64")
ELSEIF (${CPUTYPE} MATCHES "x64")
SET(PLATFORM_ARCH_STR "amd64")
MESSAGE(STATUS "input cpuType: x64")
ELSEIF (${CPUTYPE} MATCHES "x86")
SET(PLATFORM_ARCH_STR "i386")
MESSAGE(STATUS "input cpuType: x86")
ELSE ()
MESSAGE(STATUS "input cpuType unknown " ${CPUTYPE})
ENDIF ()
ENDIF ()
MESSAGE(STATUS "platform arch:" ${PLATFORM_ARCH_STR})
MESSAGE("C Compiler ID: ${CMAKE_C_COMPILER_ID}")
MESSAGE("CXX Compiler ID: ${CMAKE_CXX_COMPILER_ID}")
......@@ -54,14 +54,14 @@ Database changed.
taos> show vgroups;
vgId | tables | status | onlines | v1_dnode | v1_status | compacting |
==========================================================================================
14 | 38000 | ready | 1 | 1 | master | 0 |
15 | 38000 | ready | 1 | 1 | master | 0 |
16 | 38000 | ready | 1 | 1 | master | 0 |
17 | 38000 | ready | 1 | 1 | master | 0 |
18 | 37001 | ready | 1 | 1 | master | 0 |
19 | 37000 | ready | 1 | 1 | master | 0 |
20 | 37000 | ready | 1 | 1 | master | 0 |
21 | 37000 | ready | 1 | 1 | master | 0 |
14 | 38000 | ready | 1 | 1 | leader | 0 |
15 | 38000 | ready | 1 | 1 | leader | 0 |
16 | 38000 | ready | 1 | 1 | leader | 0 |
17 | 38000 | ready | 1 | 1 | leader | 0 |
18 | 37001 | ready | 1 | 1 | leader | 0 |
19 | 37000 | ready | 1 | 1 | leader | 0 |
20 | 37000 | ready | 1 | 1 | leader | 0 |
21 | 37000 | ready | 1 | 1 | leader | 0 |
Query OK, 8 row(s) in set (0.001154s)
```
......@@ -161,14 +161,14 @@ First `show vgroups` is executed to show the vgroup distribution.
taos> show vgroups;
vgId | tables | status | onlines | v1_dnode | v1_status | compacting |
==========================================================================================
14 | 38000 | ready | 1 | 3 | master | 0 |
15 | 38000 | ready | 1 | 3 | master | 0 |
16 | 38000 | ready | 1 | 3 | master | 0 |
17 | 38000 | ready | 1 | 3 | master | 0 |
18 | 37001 | ready | 1 | 3 | master | 0 |
19 | 37000 | ready | 1 | 1 | master | 0 |
20 | 37000 | ready | 1 | 1 | master | 0 |
21 | 37000 | ready | 1 | 1 | master | 0 |
14 | 38000 | ready | 1 | 3 | leader | 0 |
15 | 38000 | ready | 1 | 3 | leader | 0 |
16 | 38000 | ready | 1 | 3 | leader | 0 |
17 | 38000 | ready | 1 | 3 | leader | 0 |
18 | 37001 | ready | 1 | 3 | leader | 0 |
19 | 37000 | ready | 1 | 1 | leader | 0 |
20 | 37000 | ready | 1 | 1 | leader | 0 |
21 | 37000 | ready | 1 | 1 | leader | 0 |
Query OK, 8 row(s) in set (0.001314s)
```
......@@ -191,14 +191,14 @@ Query OK, 0 row(s) in set (0.000575s)
taos> show vgroups;
vgId | tables | status | onlines | v1_dnode | v1_status | v2_dnode | v2_status | compacting |
=================================================================================================================
14 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
15 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
16 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
17 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
18 | 37001 | ready | 2 | 1 | slave | 3 | master | 0 |
19 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 |
20 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 |
21 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 |
14 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
15 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
16 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
17 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
18 | 37001 | ready | 2 | 1 | follower | 3 | leader | 0 |
19 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
20 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
21 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
Query OK, 8 row(s) in set (0.001242s)
```
......@@ -207,7 +207,7 @@ It can be seen from above output that vgId 18 has been moved from dnode 3 to dno
:::note
- Manual load balancing can only be performed when the automatic load balancing is disabled, i.e. `balance` is set to 0.
- Only a vnode in normal state, i.e. master or slave, can be moved. vnode can't be moved when its in status offline, unsynced or syncing.
- Only a vnode in normal state, i.e. leader or follower, can be moved. vnode can't be moved when its in status offline, unsynced or syncing.
- Before moving a vnode, it's necessary to make sure the target dnode has enough resources: CPU, memory and disk.
:::
......@@ -27,7 +27,7 @@ There may be multiple dnodes in a cluster, but only one mnode can be started in
SHOW MNODES;
```
The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched.
The end point and role/status (leader, follower, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched.
For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher.
......@@ -58,13 +58,13 @@ When a dnode is offline, it can be detected by the TDengine cluster. There are t
- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster.
:::note
If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service.
If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the leader node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service.
:::
## Arbitrator
The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a master node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc.
The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a leader node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc.
To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally.
......
......@@ -21,7 +21,7 @@ The following example is in an Ubuntu environment and uses the `curl` tool to ve
The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
```html
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql
```
The following return value results indicate that the verification passed.
......@@ -106,13 +106,13 @@ The HTTP request's BODY is a complete SQL command, and the data table in the SQL
Use `curl` to initiate an HTTP request with a custom authentication method, with the following syntax.
```bash
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
```
Or
```bash
curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
```
where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`.
......@@ -192,7 +192,7 @@ Response body:
- query all records from table d1001 of database demo
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
```
Response body:
......@@ -218,7 +218,7 @@ Response body:
- Create database demo:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql
```
Response body:
......@@ -240,7 +240,7 @@ Response body:
When the HTTP request URL uses `/rest/sqlt`, the returned result set's timestamp value will be in Unix timestamp format, for example:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt
```
Response body:
......@@ -268,7 +268,7 @@ Response body:
When the HTTP request URL uses `/rest/sqlutc`, the timestamp of the returned result set will be expressed as a UTC format, for example:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc
```
Response body:
......
......@@ -211,7 +211,7 @@
],
"timeFrom": null,
"timeShift": null,
"title": "Master MNode",
"title": "Leader MNode",
"transformations": [
{
"id": "filterByValue",
......@@ -221,7 +221,7 @@
"config": {
"id": "regex",
"options": {
"value": "master"
"value": "leader"
}
},
"fieldName": "role"
......@@ -300,7 +300,7 @@
],
"timeFrom": null,
"timeShift": null,
"title": "Master MNode Create Time",
"title": "Leader MNode Create Time",
"transformations": [
{
"id": "filterByValue",
......@@ -310,7 +310,7 @@
"config": {
"id": "regex",
"options": {
"value": "master"
"value": "leader"
}
},
"fieldName": "role"
......
......@@ -153,7 +153,7 @@
],
"timeFrom": null,
"timeShift": null,
"title": "Master MNode",
"title": "Leader MNode",
"transformations": [
{
"id": "filterByValue",
......@@ -163,7 +163,7 @@
"config": {
"id": "regex",
"options": {
"value": "master"
"value": "leader"
}
},
"fieldName": "role"
......@@ -246,7 +246,7 @@
],
"timeFrom": null,
"timeShift": null,
"title": "Master MNode Create Time",
"title": "Leader MNode Create Time",
"transformations": [
{
"id": "filterByValue",
......@@ -256,7 +256,7 @@
"config": {
"id": "regex",
"options": {
"value": "master"
"value": "leader"
}
},
"fieldName": "role"
......
......@@ -274,8 +274,8 @@ Details of the metrics are as follows.
This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom).
- **First EP**: the `firstEp` setting in the current TDengine cluster.
- **Version**: TDengine server version (master mnode).
- **Master Uptime**: The time elapsed since the current Master MNode was elected as Master.
- **Version**: TDengine server version (leader mnode).
- **Leader Uptime**: The time elapsed since the current Leader MNode was elected as Leader.
- **Expire Time** - Enterprise version expiration time.
- **Used Measuring Points** - The number of measuring points used by the Enterprise Edition.
- **Databases** - The number of databases.
......@@ -333,7 +333,7 @@ Data node resource usage display with repeated multiple rows for the variable `$
2. **Has MNodes?**: whether the current dnode is a mnode.
3. **CPU Cores**: the number of CPU cores.
4. **VNodes Number**: the number of VNodes in the current dnode.
5. **VNodes Masters**: the number of vnodes in the master role.
5. **VNodes Masters**: the number of vnodes in the leader role.
6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes.
7. **Current Memory Usage of taosd**: memory usage of taosd processes.
8. **Disk Used**: The total disk usage percentage of the taosd data directory.
......
......@@ -26,7 +26,6 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d
- _remove.sh_: script to uninstall TDengine, please execute it carefully, link to the **rmtaos** command in the /usr/bin directory. Will remove the TDengine installation directory `/usr/local/taos`, but will keep `/etc/taos`, `/var/lib/taos`, `/var/log/taos`
- _taosadapter_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares
- _tarbitrator_: provides arbitration for two-node cluster deployments
- _run_taosd_and_taosadapter.sh_: script to start both taosd and taosAdapter
- _TDinsight.sh_: script to download TDinsight and install it
- _set_core.sh_: script for setting up the system to generate core dump files for easy debugging
- _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution.
......
......@@ -22,9 +22,9 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node.
**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction.
**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The leader/follower mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the leader. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction.
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a leader/follower mechanism. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
**TAOSC**: TAOSC is the driver provided by TDengine to applications. It is responsible for dealing with the interaction between application and cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster.
......@@ -62,13 +62,13 @@ To explain the relationship between vnode, mnode, TAOSC and application and thei
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
2. TAOSC checks the cache to see if meta data exists for the table. If it does, it goes straight to Step 4. If not, TAOSC sends a get meta-data request to mnode.
3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode.
4. TAOSC initiates an insert request to master vnode.
4. TAOSC initiates an insert request to leader vnode.
5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup.
6. TAOSC notifies APP that writing is successful.
For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to obtain meta-data to the EP of another mnode.
For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of master node.
For Step 4 and 5, without caching, TAOSC can't recognize the leader in the virtual node group, so assumes that the first vnode is the leader and sends a request to it. If this vnode is not the leader, it will reply to the actual leader as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of leader node.
The above describes the process of inserting data. The processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications.
......@@ -119,65 +119,65 @@ The load balancing process does not require any manual intervention, and it is t
## Data Writing and Replication Process
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect.
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect.
### Master vnode Writing Process
### Leader vnode Writing Process
Master Vnode uses a writing process as follows:
Leader Vnode uses a writing process as follows:
![TDengine Database Master Writing Process](write_master.webp)
<center> Figure 3: TDengine Master writing process </center>
![TDengine Database Leader Writing Process](write_master.webp)
<center> Figure 3: TDengine Leader writing process </center>
1. Master vnode receives the application data insertion request, verifies, and moves to next step;
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data;
3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Write into memory and add the record to “skip list”;
5. Master vnode returns a confirmation message to the application, indicating a successful write.
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
### Slave vnode Writing Process
### Follower vnode Writing Process
For a slave vnode, the write process as follows:
For a follower vnode, the write process as follows:
![TDengine Database Slave Writing Process](write_slave.webp)
<center> Figure 4: TDengine Slave Writing Process </center>
![TDengine Database Follower Writing Process](write_slave.webp)
<center> Figure 4: TDengine Follower Writing Process </center>
1. Slave vnode receives a data insertion request forwarded by Master vnode;
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
3. Write into memory and add the record to “skip list”.
Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same.
Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same.
### Remote Disaster Recovery and IDC (Internet Data Center) Migration
As discussed above, TDengine writes using Master and Slave processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
As discussed above, TDengine writes using Leader and Follower processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, leader and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
However, the asynchronous replication has a very low probability scenario where data may be lost. The specific scenario is as follows:
1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down;
2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2;
3. Slave vnode will become the new master, thus losing one record.
1. Leader vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down;
2. Follower vnode receives the write request, then processing fails before writing to the log in Step 2;
3. Follower vnode will become the new leader, thus losing one record.
In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this is an extremely low probability scenario as described above.
Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet**
### Master/slave Selection
### Leader/follower Selection
Vnode maintains a version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
When a vnode starts, the roles (master, slave) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a master-selection process. The rules are as follows:
When a vnode starts, the roles (leader, follower) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a leader-selection process. The rules are as follows:
1. If there’s only one replica, it’s always master
2. When all replicas are online, the one with latest version is master
3. Over half of online nodes are virtual nodes, and some virtual node is slave, it will automatically become master
4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master.
1. If there’s only one replica, it’s always leader
2. When all replicas are online, the one with latest version is leader
3. Over half of online nodes are virtual nodes, and some virtual node is follower, it will automatically become leader
4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as leader.
### Synchronous Replication
For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application.
For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Leader forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in follower. If “quorum-1” reply confirms are not received within a certain period of time, the leader vnode will return an error to the application.
With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistency, the default for data synchronization between mnodes is synchronous replication.
......
......@@ -109,7 +109,7 @@ taos>
It's also able to access the REST interface provided by TDengine in container from the host.
```
curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
```
Output is like below:
......@@ -147,7 +147,7 @@ docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-604
- Verify the REST interface:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" 127.0.0.1:6041/rest/sql
```
Below is an example output:
......
......@@ -21,7 +21,7 @@ RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安
下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号:
```html
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql
```
返回值结果如下表示验证通过:
......@@ -106,13 +106,13 @@ HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据
使用 `curl` 通过自定义身份认证方式来发起一个 HTTP Request,语法如下:
```bash
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
```
或者
```bash
curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
```
其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`
......@@ -192,7 +192,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
- 在 demo 库里查询表 d1001 的所有记录:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
```
返回值:
......@@ -218,7 +218,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
- 创建库 demo:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql
```
返回值:
......@@ -240,7 +240,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
HTTP 请求 URL 采用 `/rest/sqlt` 时,返回结果集的时间戳将采用 Unix 时间戳格式表示,例如
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt
```
返回结果:
......@@ -268,7 +268,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001
HTTP 请求 URL 采用 `/rest/sqlutc` 时,返回结果集的时间戳将采用 UTC 时间字符串表示,例如
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc
```
返回值:
......
......@@ -26,7 +26,6 @@ TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- _remove.sh_:卸载 TDengine 的脚本,请谨慎执行,链接到/usr/bin 目录下的**rmtaos**命令。会删除 TDengine 的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos
- _taosadapter_: 提供 RESTful 服务和接受其他多种软件写入请求的服务端可执行文件
- _tarbitrator_: 提供双节点集群部署的仲裁功能
- _run_taosd_and_taosadapter.sh_:同时启动 taosd 和 taosAdapter 的脚本
- _TDinsight.sh_:用于下载 TDinsight 并安装的脚本
- _set_core.sh_:用于方便调试设置系统生成 core dump 文件的脚本
- _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。
......
......@@ -23,7 +23,7 @@ TDengine 分布式架构的逻辑结构图如下:
**虚拟节点(vnode):** 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中 V2,V3,V4 等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个 DB,但一个 DB 可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的 schema、标签值等。一个虚拟节点由所属的数据节点的 EP,以及所属的 VGroup ID 在系统内唯一标识,由管理节点创建并管理。
**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(开源版最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M0,M1,M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步,任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。
**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M0,M1,M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步,任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。
**虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vgroup)来保证系统的高可靠。虚拟节点组内采取 master/slave 的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个 DB 的副本数为 N,系统必须有至少 N 数据节点。副本数在创建 DB 时通过参数 replica 可以指定,缺省为 1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的 ID,VGroup ID。如果两个虚拟节点的 VGroup ID 相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID 是永远不变的,即使一个虚拟节点组被删除,它的 ID 也不会被收回重复利用。
......
......@@ -108,7 +108,7 @@ taos>
也可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。
```
curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
```
输出示例如下:
......@@ -148,7 +148,7 @@ docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-604
使用 curl 命令验证 RESTful 接口可以正常工作:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" 127.0.0.1:6041/rest/sql
```
输出示例如下:
......
......@@ -32,12 +32,14 @@ int32_t init_env() {
}
taos_free_result(pRes);
#if 0
pRes = taos_query(pConn, "create database if not exists abc2 vgroups 20");
if (taos_errno(pRes) != 0) {
printf("error in create db, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
#endif
pRes = taos_query(pConn, "use abc1");
if (taos_errno(pRes) != 0) {
......@@ -66,6 +68,14 @@ int32_t init_env() {
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists tu3 using st1 tags(3)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table tu3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
return 0;
}
......@@ -89,8 +99,8 @@ int32_t create_stream() {
/*const char* sql = "select sum(k) from tu1 interval(10m)";*/
/*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/
pRes = taos_query(pConn,
"create stream stream1 trigger at_once into abc2.outstb as select _wstartts, sum(k) from st1 "
"partition by tbname interval(10m) ");
"create stream stream1 trigger at_once into outstb as select _wstartts, sum(k) from st1 partition "
"by tbname interval(10s) ");
if (taos_errno(pRes) != 0) {
printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes));
return -1;
......@@ -107,11 +117,4 @@ int main(int argc, char* argv[]) {
code = init_env();
}
create_stream();
#if 0
tmq_t* tmq = build_consumer();
tmq_list_t* topic_list = build_topic_list();
/*perf_loop(tmq, topic_list);*/
/*basic_consume_loop(tmq, topic_list);*/
sync_consume_loop(tmq, topic_list);
#endif
}
......@@ -26,6 +26,14 @@ static void msg_process(TAOS_RES* msg) {
printf("topic: %s\n", tmq_get_topic_name(msg));
printf("db: %s\n", tmq_get_db_name(msg));
printf("vg: %d\n", tmq_get_vgroup_id(msg));
if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) {
void* meta;
int32_t metaLen;
tmq_get_raw_meta(msg, &meta, &metaLen);
printf("meta, len is %d\n", metaLen);
return;
}
while (1) {
TAOS_ROW row = taos_fetch_row(msg);
if (row == NULL) break;
......@@ -76,19 +84,41 @@ int32_t init_env() {
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct0 values(now, 1, 2, 'a')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct1 using st1 tags(2000)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table tu2, reason:%s\n", taos_errstr(pRes));
printf("failed to create child table ct1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct1 values(now, 3, 4, 'b')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct3 using st1 tags(3000)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table tu3, reason:%s\n", taos_errstr(pRes));
printf("failed to create child table ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct3 values(now, 5, 6, 'c')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
return 0;
}
......@@ -107,8 +137,8 @@ int32_t create_topic() {
}
taos_free_result(pRes);
/*pRes = taos_query(pConn, "create topic topic_ctb_column as database abc1");*/
pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1");
pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
/*pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1");*/
if (taos_errno(pRes) != 0) {
printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
return -1;
......@@ -168,6 +198,9 @@ tmq_t* build_consumer() {
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set(conf, "enable.auto.commit", "true");
tmq_conf_set(conf, "experiment.use.snapshot", "false");
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
assert(tmq);
......
......@@ -187,8 +187,8 @@ DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res);
DLL_EXPORT const char *taos_get_server_info(TAOS *taos);
DLL_EXPORT const char *taos_get_client_info();
DLL_EXPORT const char *taos_errstr(TAOS_RES *tres);
DLL_EXPORT int taos_errno(TAOS_RES *tres);
DLL_EXPORT const char *taos_errstr(TAOS_RES *res);
DLL_EXPORT int taos_errno(TAOS_RES *res);
DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param);
DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param);
......@@ -252,6 +252,16 @@ DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_comm
/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */
enum tmq_res_t {
TMQ_RES_INVALID = -1,
TMQ_RES_DATA = 1,
TMQ_RES_TABLE_META = 2,
};
typedef enum tmq_res_t tmq_res_t;
DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_raw_meta(TAOS_RES *res, void **raw_meta, int32_t *raw_meta_len);
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
......
......@@ -41,6 +41,7 @@ extern "C" {
#define TSDB_INS_TABLE_VGROUPS "vgroups"
#define TSDB_INS_TABLE_VNODES "vnodes"
#define TSDB_INS_TABLE_CONFIGS "configs"
#define TSDB_INS_TABLE_DNODE_VARIABLES "dnode_variables"
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
#define TSDB_PERFS_TABLE_SMAS "smas"
......
......@@ -34,6 +34,7 @@ enum {
enum {
TMQ_MSG_TYPE__DUMMY = 0,
TMQ_MSG_TYPE__POLL_RSP,
TMQ_MSG_TYPE__POLL_META_RSP,
TMQ_MSG_TYPE__EP_RSP,
TMQ_MSG_TYPE__END_RSP,
};
......@@ -41,14 +42,23 @@ enum {
typedef enum EStreamType {
STREAM_NORMAL = 1,
STREAM_INVERT,
STREAM_REPROCESS,
STREAM_CLEAR,
STREAM_INVALID,
STREAM_GET_ALL,
STREAM_DELETE,
STREAM_RETRIEVE,
STREAM_PUSH_DATA,
STREAM_PUSH_EMPTY,
} EStreamType;
typedef struct {
SArray* pGroupList;
SArray* pTableList;
SHashObj* map; // speedup acquire the tableQueryInfo by table uid
bool needSortTableByGroupId;
void* pTagCond;
void* pTagIndexCond;
uint64_t suid;
} STableListInfo;
typedef struct SColumnDataAgg {
......@@ -63,14 +73,13 @@ typedef struct SColumnDataAgg {
typedef struct SDataBlockInfo {
STimeWindow window;
int32_t rows;
int32_t rows; // todo hide this attribute
int32_t rowSize;
int64_t uid; // the uid of table, from which current data block comes
int64_t blockId; // block id, generated by physical planner
uint64_t uid; // the uid of table, from which current data block comes
uint16_t blockId; // block id, generated by physical planner
uint64_t groupId; // no need to serialize
int16_t numOfCols;
int16_t hasVarCol;
int32_t capacity;
uint32_t capacity;
// TODO: optimize and remove following
int32_t childId; // used for stream, do not serialize
EStreamType type; // used for stream, do not serialize
......@@ -91,7 +100,7 @@ typedef struct SVarColAttr {
// pBlockAgg->numOfNull == info.rows, all data are null
// pBlockAgg->numOfNull == 0, no data are null.
typedef struct SColumnInfoData {
SColumnInfo info; // TODO filter info needs to be removed
SColumnInfo info; // column info
bool hasNull; // if current column data has null value.
char* pData; // the corresponding block data in memory
union {
......@@ -108,10 +117,10 @@ typedef struct SQueryTableDataCond {
int32_t type; // data block load type:
int32_t numOfTWindows;
STimeWindow* twindows;
int64_t startVersion;
int64_t endVersion;
int32_t numOfTables; // number of tables
uint64_t* uidList; // table uid list
int64_t startVersion; // start version
int64_t endVersion; // end version
} SQueryTableDataCond;
void* blockDataDestroy(SSDataBlock* pBlock);
......
......@@ -71,7 +71,8 @@ SEpSet getEpSet_s(SCorEpSet* pEpSet);
#define colDataGetData(p1_, r_) \
((IS_VAR_DATA_TYPE((p1_)->info.type)) ? colDataGetVarData(p1_, r_) : colDataGetNumData(p1_, r_))
#define IS_JSON_NULL(type, data) ((type) == TSDB_DATA_TYPE_JSON && *(data) == TSDB_DATA_TYPE_NULL)
#define IS_JSON_NULL(type, data) \
((type) == TSDB_DATA_TYPE_JSON && (*(data) == TSDB_DATA_TYPE_NULL || tTagIsJsonNull(data)))
static FORCE_INLINE bool colDataIsNull_s(const SColumnInfoData* pColumnInfoData, uint32_t row) {
if (!pColumnInfoData->hasNull) {
......@@ -183,9 +184,10 @@ static FORCE_INLINE void colDataAppendDouble(SColumnInfoData* pColumnInfoData, u
int32_t getJsonValueLen(const char* data);
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, int32_t* capacity,
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, uint32_t* capacity,
const SColumnInfoData* pSource, uint32_t numOfRow2);
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows);
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
const SDataBlockInfo* pBlockInfo);
int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex);
int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows);
......@@ -211,7 +213,7 @@ size_t blockDataGetSerialMetaSize(uint32_t numOfCols);
int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo);
int32_t blockDataSort_rv(SSDataBlock* pDataBlock, SArray* pOrderInfo, bool nullFirst);
int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, size_t existRows, uint32_t numOfRows);
int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows);
int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows);
void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows);
......@@ -221,13 +223,22 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize);
int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n);
int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src);
int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src);
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData);
SSDataBlock* createDataBlock();
int32_t blockDataAppendColInfo(SSDataBlock* pBlock, SColumnInfoData* pColInfoData);
SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId);
SColumnInfoData* bdGetColumnInfoData(SSDataBlock* pBlock, int32_t index);
void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_t numOfCols,
int8_t needCompress);
const char* blockCompressDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData);
void blockDebugShowData(const SArray* dataBlocks, const char* flag);
void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag);
// for debug
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId,
tb_uid_t suid);
......@@ -235,7 +246,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
return blockDataGetSerialMetaSize(pBlock->info.numOfCols) + blockDataGetSize(pBlock);
return blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)) + blockDataGetSize(pBlock);
}
static FORCE_INLINE int32_t blockCompressColData(SColumnInfoData* pColRes, int32_t numOfRows, char* data,
......
......@@ -79,13 +79,14 @@ int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow);
// STag
int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag);
void tTagFree(STag *pTag);
bool tTagIsJson(const void *pTag);
bool tTagIsJsonNull(void *tagVal);
bool tTagGet(const STag *pTag, STagVal *pTagVal);
char *tTagValToData(const STagVal *pTagVal, bool isJson);
int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
int32_t tTagToValArray(const STag *pTag, SArray **ppArray);
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
void debugCheckTags(STag *pTag); // TODO: remove
// STRUCT =================
struct STColumn {
......
......@@ -137,6 +137,8 @@ extern bool tsSmlDataFormat;
// internal
extern int32_t tsTransPullupInterval;
extern int32_t tsMqRebalanceInterval;
extern int32_t tsTtlUnit;
extern int32_t tsTtlPushInterval;
#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
......@@ -149,6 +151,7 @@ void taosCfgDynamicOptions(const char *option, const char *value);
void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary);
struct SConfig *taosGetCfg();
int32_t taosSetCfg(SConfig *pCfg, char* name);
#ifdef __cplusplus
}
......
......@@ -168,7 +168,7 @@ typedef struct {
int32_t vgId;
char* dbFName;
char* tbName;
} SBuildTableMetaInput;
} SBuildTableInput;
typedef struct {
char db[TSDB_DB_FNAME_LEN];
......@@ -444,6 +444,7 @@ typedef struct {
char* comment;
char* pAst1;
char* pAst2;
SArray* pFuncs;
} SMCreateStbReq;
int32_t tSerializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq);
......@@ -509,7 +510,8 @@ typedef struct {
int8_t superUser;
int8_t connType;
SEpSet epSet;
char sVersion[128];
char sVer[TSDB_VERSION_LEN];
char sDetailVer[128];
} SConnectRsp;
int32_t tSerializeSConnectRsp(void* buf, int32_t bufLen, SConnectRsp* pRsp);
......@@ -628,11 +630,6 @@ typedef struct {
uint8_t scale;
} SColumnInfo;
typedef struct {
int64_t uid;
TSKEY key; // last accessed ts, for subscription
} STableIdInfo;
typedef struct STimeWindow {
TSKEY skey;
TSKEY ekey;
......@@ -667,6 +664,41 @@ int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp
int32_t tDeserializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
typedef struct {
SMsgHead header;
char dbFName[TSDB_DB_FNAME_LEN];
char tbName[TSDB_TABLE_NAME_LEN];
} STableCfgReq;
typedef struct {
char tbName[TSDB_TABLE_NAME_LEN];
char stbName[TSDB_TABLE_NAME_LEN];
char dbFName[TSDB_DB_FNAME_LEN];
int32_t numOfTags;
int32_t numOfColumns;
int8_t tableType;
int64_t delay1;
int64_t delay2;
int64_t watermark1;
int64_t watermark2;
int32_t ttl;
SArray* pFuncs;
int32_t commentLen;
char* pComment;
SSchema* pSchemas;
int32_t tagsLen;
char* pTags;
} STableCfg;
typedef STableCfg STableCfgRsp;
int32_t tSerializeSTableCfgReq(void* buf, int32_t bufLen, STableCfgReq* pReq);
int32_t tDeserializeSTableCfgReq(void* buf, int32_t bufLen, STableCfgReq* pReq);
int32_t tSerializeSTableCfgRsp(void* buf, int32_t bufLen, STableCfgRsp* pRsp);
int32_t tDeserializeSTableCfgRsp(void* buf, int32_t bufLen, STableCfgRsp* pRsp);
void tFreeSTableCfgRsp(STableCfgRsp* pRsp);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
int32_t numOfVgroups;
......@@ -798,6 +830,27 @@ typedef struct {
int32_t tSerializeSQnodeListReq(void* buf, int32_t bufLen, SQnodeListReq* pReq);
int32_t tDeserializeSQnodeListReq(void* buf, int32_t bufLen, SQnodeListReq* pReq);
typedef struct {
int32_t rowNum;
} SDnodeListReq;
int32_t tSerializeSDnodeListReq(void* buf, int32_t bufLen, SDnodeListReq* pReq);
int32_t tDeserializeSDnodeListReq(void* buf, int32_t bufLen, SDnodeListReq* pReq);
typedef struct {
int32_t useless; // useless
} SServerVerReq;
int32_t tSerializeSServerVerReq(void* buf, int32_t bufLen, SServerVerReq* pReq);
int32_t tDeserializeSServerVerReq(void* buf, int32_t bufLen, SServerVerReq* pReq);
typedef struct {
char ver[TSDB_VERSION_LEN];
} SServerVerRsp;
int32_t tSerializeSServerVerRsp(void* buf, int32_t bufLen, SServerVerRsp* pRsp);
int32_t tDeserializeSServerVerRsp(void* buf, int32_t bufLen, SServerVerRsp* pRsp);
typedef struct SQueryNodeAddr {
int32_t nodeId; // vgId or qnodeId
SEpSet epSet;
......@@ -816,6 +869,14 @@ int32_t tSerializeSQnodeListRsp(void* buf, int32_t bufLen, SQnodeListRsp* pRsp);
int32_t tDeserializeSQnodeListRsp(void* buf, int32_t bufLen, SQnodeListRsp* pRsp);
void tFreeSQnodeListRsp(SQnodeListRsp* pRsp);
typedef struct {
SArray* dnodeList; // SArray<SEpSet>
} SDnodeListRsp;
int32_t tSerializeSDnodeListRsp(void* buf, int32_t bufLen, SDnodeListRsp* pRsp);
int32_t tDeserializeSDnodeListRsp(void* buf, int32_t bufLen, SDnodeListRsp* pRsp);
void tFreeSDnodeListRsp(SDnodeListRsp* pRsp);
typedef struct {
SArray* pArray; // Array of SUseDbRsp
} SUseDbBatchRsp;
......@@ -1174,6 +1235,27 @@ typedef struct {
char* data;
} STagData;
typedef struct {
int32_t useless; // useless
} SShowVariablesReq;
int32_t tSerializeSShowVariablesReq(void* buf, int32_t bufLen, SShowVariablesReq* pReq);
int32_t tDeserializeSShowVariablesReq(void* buf, int32_t bufLen, SShowVariablesReq* pReq);
typedef struct {
char name[TSDB_CONFIG_OPTION_LEN + 1];
char value[TSDB_CONFIG_VALUE_LEN + 1];
} SVariablesInfo;
typedef struct {
SArray* variables; // SArray<SVariablesInfo>
} SShowVariablesRsp;
int32_t tSerializeSShowVariablesRsp(void* buf, int32_t bufLen, SShowVariablesRsp* pReq);
int32_t tDeserializeSShowVariablesRsp(void* buf, int32_t bufLen, SShowVariablesRsp* pReq);
void tFreeSShowVariablesRsp(SShowVariablesRsp* pRsp);
/*
* sql: show tables like '%a_%'
* payload is the query condition, e.g., '%a_%'
......@@ -1202,6 +1284,7 @@ void tFreeSShowRsp(SShowRsp* pRsp);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
char tb[TSDB_TABLE_NAME_LEN];
char user[TSDB_USER_LEN];
int64_t showId;
} SRetrieveTableReq;
......@@ -1223,6 +1306,8 @@ typedef struct {
int32_t compLen;
int32_t numOfRows;
int32_t numOfCols;
int64_t skey;
int64_t ekey;
char data[];
} SRetrieveTableRsp;
......@@ -1286,11 +1371,19 @@ typedef struct {
int32_t dnodeId;
char config[TSDB_DNODE_CONFIG_LEN];
char value[TSDB_DNODE_VALUE_LEN];
} SMCfgDnodeReq, SDCfgDnodeReq;
} SMCfgDnodeReq;
int32_t tSerializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
int32_t tDeserializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
typedef struct {
char config[TSDB_DNODE_CONFIG_LEN];
char value[TSDB_DNODE_VALUE_LEN];
} SDCfgDnodeReq;
int32_t tSerializeSDCfgDnodeReq(void* buf, int32_t bufLen, SDCfgDnodeReq* pReq);
int32_t tDeserializeSDCfgDnodeReq(void* buf, int32_t bufLen, SDCfgDnodeReq* pReq);
typedef struct {
int32_t dnodeId;
} SMCreateMnodeReq, SMDropMnodeReq, SDDropMnodeReq, SMCreateQnodeReq, SMDropQnodeReq, SDCreateQnodeReq, SDDropQnodeReq,
......@@ -1317,7 +1410,7 @@ int32_t tSerializeSSetStandbyReq(void* buf, int32_t bufLen, SSetStandbyReq* pReq
int32_t tDeserializeSSetStandbyReq(void* buf, int32_t bufLen, SSetStandbyReq* pReq);
typedef struct {
char queryStrId[TSDB_QUERY_ID_LEN];
char queryStrId[TSDB_QUERY_ID_LEN];
} SKillQueryReq;
int32_t tSerializeSKillQueryReq(void* buf, int32_t bufLen, SKillQueryReq* pReq);
......@@ -1560,6 +1653,7 @@ typedef struct {
char name[TSDB_TOPIC_FNAME_LEN]; // accout.topic
int8_t igExists;
int8_t subType;
int8_t withMeta;
char* sql;
char subDbName[TSDB_DB_FNAME_LEN];
union {
......@@ -1776,12 +1870,10 @@ typedef struct {
} SDDropTopicReq;
typedef struct {
float xFilesFactor;
int32_t delay;
int32_t qmsg1Len;
int32_t qmsg2Len;
char* qmsg1; // pAst1:qmsg1:SRetention1 => trigger aggr task1
char* qmsg2; // pAst2:qmsg2:SRetention2 => trigger aggr task2
int64_t maxdelay[2];
int64_t watermark[2];
int32_t qmsgLen[2];
char* qmsg[2]; // pAst:qmsg:SRetention => trigger aggr task1/2
} SRSmaParam;
int32_t tEncodeSRSmaParam(SEncoder* pCoder, const SRSmaParam* pRSmaParam);
......@@ -1794,7 +1886,7 @@ typedef struct SVCreateStbReq {
int8_t rollup;
SSchemaWrapper schemaRow;
SSchemaWrapper schemaTag;
SRSmaParam pRSmaParam;
SRSmaParam rsmaParam;
} SVCreateStbReq;
int tEncodeSVCreateStbReq(SEncoder* pCoder, const SVCreateStbReq* pReq);
......@@ -2284,6 +2376,9 @@ typedef struct {
int8_t igNotExists;
} SMDropStreamReq;
int32_t tSerializeSMDropStreamReq(void* buf, int32_t bufLen, const SMDropStreamReq* pReq);
int32_t tDeserializeSMDropStreamReq(void* buf, int32_t bufLen, SMDropStreamReq* pReq);
typedef struct {
int8_t reserved;
} SMDropStreamRsp;
......@@ -2305,6 +2400,7 @@ typedef struct {
int64_t newConsumerId;
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
int8_t subType;
int8_t withMeta;
char* qmsg;
int64_t suid;
} SMqRebVgReq;
......@@ -2317,6 +2413,7 @@ static FORCE_INLINE int32_t tEncodeSMqRebVgReq(void** buf, const SMqRebVgReq* pR
tlen += taosEncodeFixedI64(buf, pReq->newConsumerId);
tlen += taosEncodeString(buf, pReq->subKey);
tlen += taosEncodeFixedI8(buf, pReq->subType);
tlen += taosEncodeFixedI8(buf, pReq->withMeta);
if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) {
tlen += taosEncodeString(buf, pReq->qmsg);
} else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
......@@ -2332,6 +2429,7 @@ static FORCE_INLINE void* tDecodeSMqRebVgReq(const void* buf, SMqRebVgReq* pReq)
buf = taosDecodeFixedI64(buf, &pReq->newConsumerId);
buf = taosDecodeStringTo(buf, pReq->subKey);
buf = taosDecodeFixedI8(buf, &pReq->subType);
buf = taosDecodeFixedI8(buf, &pReq->withMeta);
if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) {
buf = taosDecodeString(buf, &pReq->qmsg);
} else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
......@@ -2395,6 +2493,8 @@ typedef struct {
int64_t interval;
int64_t offset;
int64_t sliding;
int64_t maxDelay;
int64_t watermark;
int32_t exprLen; // strlen + 1
int32_t tagsFilterLen; // strlen + 1
int32_t sqlLen; // strlen + 1
......@@ -2604,6 +2704,7 @@ typedef struct {
SMsgHead head;
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
int8_t withTbName;
int8_t useSnapshot;
int32_t epoch;
uint64_t reqId;
int64_t consumerId;
......@@ -2676,6 +2777,34 @@ static FORCE_INLINE void tDeleteSMqSubTopicEp(SMqSubTopicEp* pSubTopicEp) {
taosArrayDestroy(pSubTopicEp->vgs);
}
typedef struct {
SMqRspHead head;
int64_t reqOffset;
int64_t rspOffset;
int16_t resMsgType;
int32_t metaRspLen;
void* metaRsp;
} SMqMetaRsp;
static FORCE_INLINE int32_t tEncodeSMqMetaRsp(void** buf, const SMqMetaRsp* pRsp) {
int32_t tlen = 0;
tlen += taosEncodeFixedI64(buf, pRsp->reqOffset);
tlen += taosEncodeFixedI64(buf, pRsp->rspOffset);
tlen += taosEncodeFixedI16(buf, pRsp->resMsgType);
tlen += taosEncodeFixedI32(buf, pRsp->metaRspLen);
tlen += taosEncodeBinary(buf, pRsp->metaRsp, pRsp->metaRspLen);
return tlen;
}
static FORCE_INLINE void* tDecodeSMqMetaRsp(const void* buf, SMqMetaRsp* pRsp) {
buf = taosDecodeFixedI64(buf, &pRsp->reqOffset);
buf = taosDecodeFixedI64(buf, &pRsp->rspOffset);
buf = taosDecodeFixedI16(buf, &pRsp->resMsgType);
buf = taosDecodeFixedI32(buf, &pRsp->metaRspLen);
buf = taosDecodeBinary(buf, &pRsp->metaRsp, pRsp->metaRspLen);
return (void*)buf;
}
typedef struct {
SMqRspHead head;
int64_t reqOffset;
......
......@@ -34,7 +34,6 @@ typedef enum {
WRITE_QUEUE,
APPLY_QUEUE,
SYNC_QUEUE,
MERGE_QUEUE,
QUEUE_MAX,
} EQueueType;
......
......@@ -81,6 +81,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_DND_SERVER_STATUS, "server-status", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_NET_TEST, "net-test", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_CONFIG_DNODE, "config-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_SYSTABLE_RETRIEVE, "dnode-retrieve", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_MND_MSG)
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
......@@ -101,6 +102,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_QNODE, "alter-qnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_QNODE, "drop-qnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_QNODE_LIST, "qnode-list", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DNODE_LIST, "dnode-list", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_SNODE, "create-snode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_SNODE, "alter-snode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_SNODE, "drop-snode", NULL, NULL)
......@@ -131,6 +133,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_DROP_INDEX, "drop-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_INDEX, "get-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_TABLE_INDEX, "get-table-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TABLE_CFG, "table-cfg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_TOPIC, "create-topic", SMCreateTopicReq, SMCreateTopicRsp)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_TOPIC, "alter-topic", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_TOPIC, "drop-topic", NULL, NULL)
......@@ -144,13 +147,14 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_MQ_TIMER, "mq-tmr", SMTimerReq, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TELEM_TIMER, "telem-tmr", SMTimerReq, SMTimerReq)
TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "trans-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TTL_TIMER, "ttl-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_KILL_TRANS, "kill-trans", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_KILL_QUERY, "kill-query", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_KILL_CONN, "kill-conn", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_HEARTBEAT, "heartbeat", SClientHbBatchReq, SClientHbBatchRsp)
TD_DEF_MSG_TYPE(TDMT_MND_STATUS, "status", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SHOW, "show", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SYSTABLE_RETRIEVE, "retrieve", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SYSTABLE_RETRIEVE, "mnd-retrieve", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GRANT, "grant", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_AUTH, "auth", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_APPLY_MSG, "mnode-apply", NULL, NULL)
......@@ -158,6 +162,8 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_MERGE_VGROUP, "merge-vgroup", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_REDISTRIBUTE_VGROUP, "redistribute-vgroup", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SPLIT_VGROUP, "split-vgroup", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SHOW_VARIABLES, "show-variables", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_MSG)
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
......@@ -171,6 +177,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_UPDATE_TAG_VAL, "update-tag-val", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TABLE_META, "vnode-table-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TABLES_META, "vnode-tables-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TABLE_CFG, "vnode-table-cfg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_STB, "vnode-create-stb", SVCreateStbReq, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_STB, "vnode-alter-stb", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_STB, "vnode-drop-stb", SVDropStbReq, NULL)
......@@ -187,7 +194,6 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_CONSUME, "vnode-consume", SMqPollReq, SMqDataBlkRsp)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_DISPATCH_WRITE, "vnode-stream-task-dispatch-write", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_DROP, "vnode-stream-task-drop", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_SMA, "vnode-create-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL)
......@@ -199,14 +205,14 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_HASHRANGE, "alter-hashrange", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_COMPACT, "compact", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TTL_TABLE, "drop-ttl-stb", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_QND_MSG)
//shared by snode and vnode
TD_NEW_MSG_SEG(TDMT_STREAM_MSG)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DEPLOY, "stream-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DROP, "stream-task-drop", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RUN, "stream-task-run", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DISPATCH, "stream-task-dispatch", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RECOVER, "stream-task-recover", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_SCH_MSG)
TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL)
......@@ -236,6 +242,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_SYNC_COMMON_RESPONSE, "sync-common-response", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_APPLY_MSG, "sync-apply-msg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_CONFIG_CHANGE, "sync-config-change", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_CONFIG_CHANGE_FINISH, "sync-config-change-finish", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_SNAPSHOT_SEND, "sync-snapshot-send", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_SNAPSHOT_RSP, "sync-snapshot-rsp", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_LEADER_TRANSFER, "sync-leader-transfer", NULL, NULL)
......
......@@ -57,12 +57,16 @@ void tNameAssign(SName* dst, const SName* src);
int32_t tNameSetDbName(SName* dst, int32_t acctId, const char* dbName, size_t nameLen);
int32_t tNameAddTbName(SName* dst, const char* tbName, size_t nameLen);
int32_t tNameFromString(SName* dst, const char* str, uint32_t type);
int32_t tNameSetAcctId(SName* dst, int32_t acctId);
bool tNameDBNameEqual(SName* left, SName* right);
bool tNameTbNameEqual(SName* left, SName* right);
typedef struct {
// input
SArray* tags; // element is SSmlKv
......
......@@ -49,214 +49,218 @@
#define TK_CONNS 31
#define TK_STATE 32
#define TK_USER 33
#define TK_PRIVILEGE 34
#define TK_DROP 35
#define TK_GRANT 36
#define TK_ON 37
#define TK_TO 38
#define TK_REVOKE 39
#define TK_FROM 40
#define TK_NK_COMMA 41
#define TK_READ 42
#define TK_WRITE 43
#define TK_NK_DOT 44
#define TK_DNODE 45
#define TK_PORT 46
#define TK_NK_INTEGER 47
#define TK_DNODES 48
#define TK_NK_IPTOKEN 49
#define TK_LOCAL 50
#define TK_QNODE 51
#define TK_BNODE 52
#define TK_SNODE 53
#define TK_MNODE 54
#define TK_DATABASE 55
#define TK_USE 56
#define TK_IF 57
#define TK_NOT 58
#define TK_EXISTS 59
#define TK_BUFFER 60
#define TK_CACHELAST 61
#define TK_COMP 62
#define TK_DURATION 63
#define TK_NK_VARIABLE 64
#define TK_FSYNC 65
#define TK_MAXROWS 66
#define TK_MINROWS 67
#define TK_KEEP 68
#define TK_PAGES 69
#define TK_PAGESIZE 70
#define TK_PRECISION 71
#define TK_REPLICA 72
#define TK_STRICT 73
#define TK_WAL 74
#define TK_VGROUPS 75
#define TK_SINGLE_STABLE 76
#define TK_RETENTIONS 77
#define TK_SCHEMALESS 78
#define TK_NK_COLON 79
#define TK_TABLE 80
#define TK_NK_LP 81
#define TK_NK_RP 82
#define TK_STABLE 83
#define TK_ADD 84
#define TK_COLUMN 85
#define TK_MODIFY 86
#define TK_RENAME 87
#define TK_TAG 88
#define TK_SET 89
#define TK_NK_EQ 90
#define TK_USING 91
#define TK_TAGS 92
#define TK_COMMENT 93
#define TK_BOOL 94
#define TK_TINYINT 95
#define TK_SMALLINT 96
#define TK_INT 97
#define TK_INTEGER 98
#define TK_BIGINT 99
#define TK_FLOAT 100
#define TK_DOUBLE 101
#define TK_BINARY 102
#define TK_TIMESTAMP 103
#define TK_NCHAR 104
#define TK_UNSIGNED 105
#define TK_JSON 106
#define TK_VARCHAR 107
#define TK_MEDIUMBLOB 108
#define TK_BLOB 109
#define TK_VARBINARY 110
#define TK_DECIMAL 111
#define TK_MAX_DELAY 112
#define TK_WATERMARK 113
#define TK_ROLLUP 114
#define TK_TTL 115
#define TK_SMA 116
#define TK_FIRST 117
#define TK_LAST 118
#define TK_SHOW 119
#define TK_DATABASES 120
#define TK_TABLES 121
#define TK_STABLES 122
#define TK_MNODES 123
#define TK_MODULES 124
#define TK_QNODES 125
#define TK_FUNCTIONS 126
#define TK_INDEXES 127
#define TK_ACCOUNTS 128
#define TK_APPS 129
#define TK_CONNECTIONS 130
#define TK_LICENCE 131
#define TK_GRANTS 132
#define TK_QUERIES 133
#define TK_SCORES 134
#define TK_TOPICS 135
#define TK_VARIABLES 136
#define TK_BNODES 137
#define TK_SNODES 138
#define TK_CLUSTER 139
#define TK_TRANSACTIONS 140
#define TK_DISTRIBUTED 141
#define TK_LIKE 142
#define TK_INDEX 143
#define TK_FULLTEXT 144
#define TK_FUNCTION 145
#define TK_INTERVAL 146
#define TK_TOPIC 147
#define TK_AS 148
#define TK_CONSUMER 149
#define TK_GROUP 150
#define TK_DESC 151
#define TK_DESCRIBE 152
#define TK_RESET 153
#define TK_QUERY 154
#define TK_CACHE 155
#define TK_EXPLAIN 156
#define TK_ANALYZE 157
#define TK_VERBOSE 158
#define TK_NK_BOOL 159
#define TK_RATIO 160
#define TK_NK_FLOAT 161
#define TK_COMPACT 162
#define TK_VNODES 163
#define TK_IN 164
#define TK_OUTPUTTYPE 165
#define TK_AGGREGATE 166
#define TK_BUFSIZE 167
#define TK_STREAM 168
#define TK_INTO 169
#define TK_TRIGGER 170
#define TK_AT_ONCE 171
#define TK_WINDOW_CLOSE 172
#define TK_KILL 173
#define TK_CONNECTION 174
#define TK_TRANSACTION 175
#define TK_BALANCE 176
#define TK_VGROUP 177
#define TK_MERGE 178
#define TK_REDISTRIBUTE 179
#define TK_SPLIT 180
#define TK_SYNCDB 181
#define TK_DELETE 182
#define TK_NULL 183
#define TK_NK_QUESTION 184
#define TK_NK_ARROW 185
#define TK_ROWTS 186
#define TK_TBNAME 187
#define TK_QSTARTTS 188
#define TK_QENDTS 189
#define TK_WSTARTTS 190
#define TK_WENDTS 191
#define TK_WDURATION 192
#define TK_CAST 193
#define TK_NOW 194
#define TK_TODAY 195
#define TK_TIMEZONE 196
#define TK_COUNT 197
#define TK_LAST_ROW 198
#define TK_BETWEEN 199
#define TK_IS 200
#define TK_NK_LT 201
#define TK_NK_GT 202
#define TK_NK_LE 203
#define TK_NK_GE 204
#define TK_NK_NE 205
#define TK_MATCH 206
#define TK_NMATCH 207
#define TK_CONTAINS 208
#define TK_JOIN 209
#define TK_INNER 210
#define TK_SELECT 211
#define TK_DISTINCT 212
#define TK_WHERE 213
#define TK_PARTITION 214
#define TK_BY 215
#define TK_SESSION 216
#define TK_STATE_WINDOW 217
#define TK_SLIDING 218
#define TK_FILL 219
#define TK_VALUE 220
#define TK_NONE 221
#define TK_PREV 222
#define TK_LINEAR 223
#define TK_NEXT 224
#define TK_HAVING 225
#define TK_RANGE 226
#define TK_EVERY 227
#define TK_ORDER 228
#define TK_SLIMIT 229
#define TK_SOFFSET 230
#define TK_LIMIT 231
#define TK_OFFSET 232
#define TK_ASC 233
#define TK_NULLS 234
#define TK_ID 235
#define TK_NK_BITNOT 236
#define TK_INSERT 237
#define TK_VALUES 238
#define TK_IMPORT 239
#define TK_NK_SEMI 240
#define TK_FILE 241
#define TK_ENABLE 34
#define TK_NK_INTEGER 35
#define TK_SYSINFO 36
#define TK_DROP 37
#define TK_GRANT 38
#define TK_ON 39
#define TK_TO 40
#define TK_REVOKE 41
#define TK_FROM 42
#define TK_NK_COMMA 43
#define TK_READ 44
#define TK_WRITE 45
#define TK_NK_DOT 46
#define TK_DNODE 47
#define TK_PORT 48
#define TK_DNODES 49
#define TK_NK_IPTOKEN 50
#define TK_LOCAL 51
#define TK_QNODE 52
#define TK_BNODE 53
#define TK_SNODE 54
#define TK_MNODE 55
#define TK_DATABASE 56
#define TK_USE 57
#define TK_IF 58
#define TK_NOT 59
#define TK_EXISTS 60
#define TK_BUFFER 61
#define TK_CACHELAST 62
#define TK_COMP 63
#define TK_DURATION 64
#define TK_NK_VARIABLE 65
#define TK_FSYNC 66
#define TK_MAXROWS 67
#define TK_MINROWS 68
#define TK_KEEP 69
#define TK_PAGES 70
#define TK_PAGESIZE 71
#define TK_PRECISION 72
#define TK_REPLICA 73
#define TK_STRICT 74
#define TK_WAL 75
#define TK_VGROUPS 76
#define TK_SINGLE_STABLE 77
#define TK_RETENTIONS 78
#define TK_SCHEMALESS 79
#define TK_NK_COLON 80
#define TK_TABLE 81
#define TK_NK_LP 82
#define TK_NK_RP 83
#define TK_STABLE 84
#define TK_ADD 85
#define TK_COLUMN 86
#define TK_MODIFY 87
#define TK_RENAME 88
#define TK_TAG 89
#define TK_SET 90
#define TK_NK_EQ 91
#define TK_USING 92
#define TK_TAGS 93
#define TK_COMMENT 94
#define TK_BOOL 95
#define TK_TINYINT 96
#define TK_SMALLINT 97
#define TK_INT 98
#define TK_INTEGER 99
#define TK_BIGINT 100
#define TK_FLOAT 101
#define TK_DOUBLE 102
#define TK_BINARY 103
#define TK_TIMESTAMP 104
#define TK_NCHAR 105
#define TK_UNSIGNED 106
#define TK_JSON 107
#define TK_VARCHAR 108
#define TK_MEDIUMBLOB 109
#define TK_BLOB 110
#define TK_VARBINARY 111
#define TK_DECIMAL 112
#define TK_MAX_DELAY 113
#define TK_WATERMARK 114
#define TK_ROLLUP 115
#define TK_TTL 116
#define TK_SMA 117
#define TK_FIRST 118
#define TK_LAST 119
#define TK_SHOW 120
#define TK_DATABASES 121
#define TK_TABLES 122
#define TK_STABLES 123
#define TK_MNODES 124
#define TK_MODULES 125
#define TK_QNODES 126
#define TK_FUNCTIONS 127
#define TK_INDEXES 128
#define TK_ACCOUNTS 129
#define TK_APPS 130
#define TK_CONNECTIONS 131
#define TK_LICENCE 132
#define TK_GRANTS 133
#define TK_QUERIES 134
#define TK_SCORES 135
#define TK_TOPICS 136
#define TK_VARIABLES 137
#define TK_BNODES 138
#define TK_SNODES 139
#define TK_CLUSTER 140
#define TK_TRANSACTIONS 141
#define TK_DISTRIBUTED 142
#define TK_CONSUMERS 143
#define TK_SUBSCRIPTIONS 144
#define TK_LIKE 145
#define TK_INDEX 146
#define TK_FUNCTION 147
#define TK_INTERVAL 148
#define TK_TOPIC 149
#define TK_AS 150
#define TK_WITH 151
#define TK_META 152
#define TK_CONSUMER 153
#define TK_GROUP 154
#define TK_DESC 155
#define TK_DESCRIBE 156
#define TK_RESET 157
#define TK_QUERY 158
#define TK_CACHE 159
#define TK_EXPLAIN 160
#define TK_ANALYZE 161
#define TK_VERBOSE 162
#define TK_NK_BOOL 163
#define TK_RATIO 164
#define TK_NK_FLOAT 165
#define TK_COMPACT 166
#define TK_VNODES 167
#define TK_IN 168
#define TK_OUTPUTTYPE 169
#define TK_AGGREGATE 170
#define TK_BUFSIZE 171
#define TK_STREAM 172
#define TK_INTO 173
#define TK_TRIGGER 174
#define TK_AT_ONCE 175
#define TK_WINDOW_CLOSE 176
#define TK_KILL 177
#define TK_CONNECTION 178
#define TK_TRANSACTION 179
#define TK_BALANCE 180
#define TK_VGROUP 181
#define TK_MERGE 182
#define TK_REDISTRIBUTE 183
#define TK_SPLIT 184
#define TK_SYNCDB 185
#define TK_DELETE 186
#define TK_NULL 187
#define TK_NK_QUESTION 188
#define TK_NK_ARROW 189
#define TK_ROWTS 190
#define TK_TBNAME 191
#define TK_QSTARTTS 192
#define TK_QENDTS 193
#define TK_WSTARTTS 194
#define TK_WENDTS 195
#define TK_WDURATION 196
#define TK_CAST 197
#define TK_NOW 198
#define TK_TODAY 199
#define TK_TIMEZONE 200
#define TK_COUNT 201
#define TK_LAST_ROW 202
#define TK_BETWEEN 203
#define TK_IS 204
#define TK_NK_LT 205
#define TK_NK_GT 206
#define TK_NK_LE 207
#define TK_NK_GE 208
#define TK_NK_NE 209
#define TK_MATCH 210
#define TK_NMATCH 211
#define TK_CONTAINS 212
#define TK_JOIN 213
#define TK_INNER 214
#define TK_SELECT 215
#define TK_DISTINCT 216
#define TK_WHERE 217
#define TK_PARTITION 218
#define TK_BY 219
#define TK_SESSION 220
#define TK_STATE_WINDOW 221
#define TK_SLIDING 222
#define TK_FILL 223
#define TK_VALUE 224
#define TK_NONE 225
#define TK_PREV 226
#define TK_LINEAR 227
#define TK_NEXT 228
#define TK_HAVING 229
#define TK_RANGE 230
#define TK_EVERY 231
#define TK_ORDER 232
#define TK_SLIMIT 233
#define TK_SOFFSET 234
#define TK_LIMIT 235
#define TK_OFFSET 236
#define TK_ASC 237
#define TK_NULLS 238
#define TK_ID 239
#define TK_NK_BITNOT 240
#define TK_INSERT 241
#define TK_VALUES 242
#define TK_IMPORT 243
#define TK_NK_SEMI 244
#define TK_FILE 245
#define TK_NK_SPACE 300
#define TK_NK_COMMENT 301
......
......@@ -73,10 +73,12 @@ void mndStop(SMnode *pMnode);
* @param pMnode The mnode object.
* @param pCluster
* @param pVgroup
* @param pStbInfo
* @param pGrant
* @return int32_t 0 for success, -1 for failure.
*/
int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pCluster, SMonVgroupInfo *pVgroup, SMonGrantInfo *pGrant);
int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgroupInfo *pVgroupInfo,
SMonStbInfo *pStbInfo, SMonGrantInfo *pGrantInfo);
/**
* @brief Get mnode loads for status msg.
......@@ -95,8 +97,8 @@ int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad);
*/
int32_t mndProcessRpcMsg(SRpcMsg *pMsg);
int32_t mndProcessSyncMsg(SRpcMsg *pMsg);
int32_t mndPreProcessMsg(SRpcMsg *pMsg);
void mndAbortPreprocessMsg(SRpcMsg *pMsg);
int32_t mndPreProcessQueryMsg(SRpcMsg *pMsg);
void mndPostProcessQueryMsg(SRpcMsg *pMsg);
/**
* @brief Generate machine code
......
......@@ -16,8 +16,8 @@
#ifndef _TD_SNODE_H_
#define _TD_SNODE_H_
#include "tmsgcb.h"
#include "tmsg.h"
#include "tmsgcb.h"
#include "trpc.h"
#ifdef __cplusplus
......@@ -68,8 +68,8 @@ int32_t sndGetLoad(SSnode *pSnode, SSnodeLoad *pLoad);
* @param pMsg The request message
* @param pRsp The response message
*/
void sndProcessUMsg(SSnode *pSnode, SRpcMsg *pMsg);
void sndProcessSMsg(SSnode *pSnode, SRpcMsg *pMsg);
int32_t sndProcessUMsg(SSnode *pSnode, SRpcMsg *pMsg);
int32_t sndProcessSMsg(SSnode *pSnode, SRpcMsg *pMsg);
#ifdef __cplusplus
}
......
......@@ -68,7 +68,10 @@ typedef struct SCatalogReq {
SArray* pIndex; // element is index name
SArray* pUser; // element is SUserAuthInfo
SArray* pTableIndex; // element is SNAME
SArray* pTableCfg; // element is SNAME
bool qNodeRequired; // valid qnode
bool dNodeRequired; // valid dnode
bool svrVerRequired;
bool forceUpdate;
} SCatalogReq;
......@@ -78,16 +81,19 @@ typedef struct SMetaRes {
} SMetaRes;
typedef struct SMetaData {
SArray* pDbVgroup; // pRes = SArray<SVgroupInfo>*
SArray* pDbCfg; // pRes = SDbCfgInfo*
SArray* pDbInfo; // pRes = SDbInfo*
SArray* pTableMeta; // pRes = STableMeta*
SArray* pTableHash; // pRes = SVgroupInfo*
SArray* pTableIndex; // pRes = SArray<STableIndexInfo>*
SArray* pUdfList; // pRes = SFuncInfo*
SArray* pIndex; // pRes = SIndexInfo*
SArray* pUser; // pRes = bool*
SArray* pQnodeList; // pRes = SArray<SQueryNodeLoad>*
SArray* pDbVgroup; // pRes = SArray<SVgroupInfo>*
SArray* pDbCfg; // pRes = SDbCfgInfo*
SArray* pDbInfo; // pRes = SDbInfo*
SArray* pTableMeta; // pRes = STableMeta*
SArray* pTableHash; // pRes = SVgroupInfo*
SArray* pTableIndex; // pRes = SArray<STableIndexInfo>*
SArray* pUdfList; // pRes = SFuncInfo*
SArray* pIndex; // pRes = SIndexInfo*
SArray* pUser; // pRes = bool*
SArray* pQnodeList; // pRes = SArray<SQueryNodeLoad>*
SArray* pTableCfg; // pRes = STableCfg*
SArray* pDnodeList; // pRes = SArray<SEpSet>*
SMetaRes* pSvrVer; // pRes = char*
} SMetaData;
typedef struct SCatalogCfg {
......@@ -264,10 +270,12 @@ int32_t catalogGetTableHashVgroup(SCatalog* pCatalog, SRequestConnInfo* pConn, c
*/
int32_t catalogGetAllMeta(SCatalog* pCatalog, SRequestConnInfo* pConn, const SCatalogReq* pReq, SMetaData* pRsp);
int32_t catalogAsyncGetAllMeta(SCatalog* pCtg, SRequestConnInfo* pConn, uint64_t reqId, const SCatalogReq* pReq, catalogCallback fp, void* param, int64_t* jobId);
int32_t catalogAsyncGetAllMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SCatalogReq* pReq, catalogCallback fp, void* param, int64_t* jobId);
int32_t catalogGetQnodeList(SCatalog* pCatalog, SRequestConnInfo* pConn, SArray* pQnodeList);
int32_t catalogGetDnodeList(SCatalog* pCatalog, SRequestConnInfo* pConn, SArray** pDnodeList);
int32_t catalogGetExpiredSTables(SCatalog* pCatalog, SSTableVersion **stables, uint32_t *num);
int32_t catalogGetExpiredDBs(SCatalog* pCatalog, SDbVgVersion** dbs, uint32_t* num);
......@@ -280,6 +288,8 @@ int32_t catalogGetIndexMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const char*
int32_t catalogGetTableIndex(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SArray** pRes);
int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, STableCfg** pCfg);
int32_t catalogUpdateTableIndex(SCatalog* pCtg, STableIndexRsp *pRsp);
int32_t catalogGetUdfInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char* funcName, SFuncInfo* pInfo);
......@@ -290,6 +300,8 @@ int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth);
int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet);
int32_t catalogGetServerVersion(SCatalog* pCtg, SRequestConnInfo *pConn, char** pVersion);
int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, SRequestConnInfo* pConn, uint64_t reqId, bool forceUpdate);
int32_t catalogClearCache(void);
......
......@@ -33,7 +33,7 @@ struct SDataSink;
struct SSDataBlock;
typedef struct SDeleterRes {
uint64_t uid;
uint64_t suid;
SArray* uidList;
int64_t skey;
int64_t ekey;
......@@ -41,7 +41,8 @@ typedef struct SDeleterRes {
} SDeleterRes;
typedef struct SDeleterParam {
SArray* pUidList;
uint64_t suid;
SArray* pUidList;
} SDeleterParam;
typedef struct SDataSinkStat {
......
......@@ -36,11 +36,13 @@ typedef struct SReadHandle {
void* vnode;
void* mnd;
SMsgCb* pMsgCb;
// int8_t initTsdbReader;
} SReadHandle;
enum {
STREAM_DATA_TYPE_SUBMIT_BLOCK = 1,
STREAM_DATA_TYPE_SSDATA_BLOCK = 2,
STREAM_DATA_TYPE_FROM_SNAPSHOT = 3,
};
typedef enum {
......@@ -56,6 +58,13 @@ typedef enum {
*/
qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, void* streamReadHandle);
/**
* Switch the stream scan to snapshot mode
* @param tinfo
* @return
*/
int32_t qStreamScanSnapshot(qTaskInfo_t tinfo);
/**
* Set the input data block for the stream scan.
* @param tinfo
......
......@@ -67,7 +67,7 @@ typedef struct SResultRowEntryInfo {
bool initialized:1; // output buffer has been initialized
bool complete:1; // query has completed
uint8_t isNullRes:6; // the result is null
uint8_t numOfRes; // num of output result in current buffer
uint16_t numOfRes; // num of output result in current buffer
} SResultRowEntryInfo;
// determine the real data need to calculated the result
......@@ -172,23 +172,6 @@ typedef struct tExprNode {
void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *));
typedef struct SAggFunctionInfo {
char name[FUNCTIONS_NAME_MAX_LENGTH];
int8_t type; // Scalar function or aggregation function
uint32_t functionId; // Function Id
int8_t sFunctionId; // Transfer function for super table query
uint16_t status;
bool (*init)(SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResultCellInfo); // setup the execute environment
void (*addInput)(SqlFunctionCtx *pCtx);
// finalizer must be called after all exec has been executed to generated final result.
void (*finalize)(SqlFunctionCtx *pCtx);
void (*combine)(SqlFunctionCtx *pCtx);
int32_t (*dataReqFunc)(SqlFunctionCtx *pCtx, STimeWindow* w, int32_t colId);
} SAggFunctionInfo;
struct SScalarParam {
SColumnInfoData *columnData;
SHashObj *pHashFilter;
......
......@@ -123,6 +123,8 @@ typedef enum EFunctionType {
FUNCTION_TYPE_SELECT_VALUE,
FUNCTION_TYPE_BLOCK_DIST, // block distribution aggregate function
FUNCTION_TYPE_BLOCK_DIST_INFO, // block distribution pseudo column function
FUNCTION_TYPE_TO_COLUMN,
FUNCTION_TYPE_GROUP_KEY,
// distributed splitting functions
FUNCTION_TYPE_APERCENTILE_PARTIAL = 4000,
......@@ -190,6 +192,7 @@ bool fmIsForbidWindowFunc(int32_t funcId);
bool fmIsForbidGroupByFunc(int32_t funcId);
bool fmIsIntervalInterpoFunc(int32_t funcId);
bool fmIsInterpFunc(int32_t funcId);
bool fmIsLastRowFunc(int32_t funcId);
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc);
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_TAOSUDF_H
#define TDENGINE_TAOSUDF_H
#include <stdint.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <taos.h>
#include <taoserror.h>
#ifdef __cplusplus
extern "C" {
#endif
#if defined(__GNUC__)
#define FORCE_INLINE inline __attribute__((always_inline))
#else
#define FORCE_INLINE
#endif
typedef struct SUdfColumnMeta {
int16_t type;
int32_t bytes;
uint8_t precision;
uint8_t scale;
} SUdfColumnMeta;
typedef struct SUdfColumnData {
int32_t numOfRows;
int32_t rowsAlloc;
union {
struct {
int32_t nullBitmapLen;
char *nullBitmap;
int32_t dataLen;
char *data;
} fixLenCol;
struct {
int32_t varOffsetsLen;
int32_t *varOffsets;
int32_t payloadLen;
char *payload;
int32_t payloadAllocLen;
} varLenCol;
};
} SUdfColumnData;
typedef struct SUdfColumn {
SUdfColumnMeta colMeta;
bool hasNull;
SUdfColumnData colData;
} SUdfColumn;
typedef struct SUdfDataBlock {
int32_t numOfRows;
int32_t numOfCols;
SUdfColumn **udfCols;
} SUdfDataBlock;
typedef struct SUdfInterBuf {
int32_t bufLen;
char* buf;
int8_t numOfResult; //zero or one
} SUdfInterBuf;
typedef void *UdfcFuncHandle;
// dynamic lib init and destroy
typedef int32_t (*TUdfInitFunc)();
typedef int32_t (*TUdfDestroyFunc)();
#define UDF_MEMORY_EXP_GROWTH 1.5
#define NBIT (3u)
#define BitPos(_n) ((_n) & ((1 << NBIT) - 1))
#define BMCharPos(bm_, r_) ((bm_)[(r_) >> NBIT])
#define BitmapLen(_n) (((_n) + ((1 << NBIT) - 1)) >> NBIT)
#define udfColDataIsNull_var(pColumn, row) ((pColumn->colData.varLenCol.varOffsets)[row] == -1)
#define udfColDataIsNull_f(pColumn, row) ((BMCharPos(pColumn->colData.fixLenCol.nullBitmap, row) & (1u << (7u - BitPos(row)))) == (1u << (7u - BitPos(row))))
#define udfColDataSetNull_f(pColumn, row) \
do { \
BMCharPos(pColumn->colData.fixLenCol.nullBitmap, row) |= (1u << (7u - BitPos(row))); \
} while (0)
#define udfColDataSetNotNull_f(pColumn, r_) \
do { \
BMCharPos(pColumn->colData.fixLenCol.nullBitmap, r_) &= ~(1u << (7u - BitPos(r_))); \
} while (0)
#define udfColDataSetNull_var(pColumn, row) ((pColumn->colData.varLenCol.varOffsets)[row] = -1)
typedef uint16_t VarDataLenT; // maxVarDataLen: 32767
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
#define varDataLen(v) ((VarDataLenT *)(v))[0]
#define varDataVal(v) ((char *)(v) + VARSTR_HEADER_SIZE)
#define varDataTLen(v) (sizeof(VarDataLenT) + varDataLen(v))
#define varDataCopy(dst, v) memcpy((dst), (void *)(v), varDataTLen(v))
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
#define IS_VAR_DATA_TYPE(t) \
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
static FORCE_INLINE char* udfColDataGetData(const SUdfColumn* pColumn, int32_t row) {
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
return pColumn->colData.varLenCol.payload + pColumn->colData.varLenCol.varOffsets[row];
} else {
return pColumn->colData.fixLenCol.data + pColumn->colMeta.bytes * row;
}
}
static FORCE_INLINE bool udfColDataIsNull(const SUdfColumn* pColumn, int32_t row) {
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
if (pColumn->colMeta.type == TSDB_DATA_TYPE_JSON) {
if (udfColDataIsNull_var(pColumn, row)) {
return true;
}
char* data = udfColDataGetData(pColumn, row);
return (*data == TSDB_DATA_TYPE_NULL);
} else {
return udfColDataIsNull_var(pColumn, row);
}
} else {
return udfColDataIsNull_f(pColumn, row);
}
}
static FORCE_INLINE int32_t udfColEnsureCapacity(SUdfColumn* pColumn, int32_t newCapacity) {
SUdfColumnMeta *meta = &pColumn->colMeta;
SUdfColumnData *data = &pColumn->colData;
if (newCapacity== 0 || newCapacity <= data->rowsAlloc) {
return TSDB_CODE_SUCCESS;
}
int allocCapacity = (data->rowsAlloc< 8) ? 8 : data->rowsAlloc;
while (allocCapacity < newCapacity) {
allocCapacity *= UDF_MEMORY_EXP_GROWTH;
}
if (IS_VAR_DATA_TYPE(meta->type)) {
char* tmp = (char*)realloc(data->varLenCol.varOffsets, sizeof(int32_t) * allocCapacity);
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
data->varLenCol.varOffsets = (int32_t*)tmp;
data->varLenCol.varOffsetsLen = sizeof(int32_t) * allocCapacity;
// for payload, add data in udfColDataAppend
} else {
char* tmp = (char*)realloc(data->fixLenCol.nullBitmap, BitmapLen(allocCapacity));
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
data->fixLenCol.nullBitmap = tmp;
data->fixLenCol.nullBitmapLen = BitmapLen(allocCapacity);
if (meta->type == TSDB_DATA_TYPE_NULL) {
return TSDB_CODE_SUCCESS;
}
tmp = (char*)realloc(data->fixLenCol.data, allocCapacity* meta->bytes);
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
data->fixLenCol.data = tmp;
data->fixLenCol.dataLen = allocCapacity* meta->bytes;
}
data->rowsAlloc = allocCapacity;
return TSDB_CODE_SUCCESS;
}
static FORCE_INLINE void udfColDataSetNull(SUdfColumn* pColumn, int32_t row) {
udfColEnsureCapacity(pColumn, row+1);
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
udfColDataSetNull_var(pColumn, row);
} else {
udfColDataSetNull_f(pColumn, row);
}
pColumn->hasNull = true;
}
static FORCE_INLINE int32_t udfColDataSet(SUdfColumn* pColumn, uint32_t currentRow, const char* pData, bool isNull) {
SUdfColumnMeta *meta = &pColumn->colMeta;
SUdfColumnData *data = &pColumn->colData;
udfColEnsureCapacity(pColumn, currentRow+1);
bool isVarCol = IS_VAR_DATA_TYPE(meta->type);
if (isNull) {
udfColDataSetNull(pColumn, currentRow);
} else {
if (!isVarCol) {
udfColDataSetNotNull_f(pColumn, currentRow);
memcpy(data->fixLenCol.data + meta->bytes * currentRow, pData, meta->bytes);
} else {
int32_t dataLen = varDataTLen(pData);
if (meta->type == TSDB_DATA_TYPE_JSON) {
if (*pData == TSDB_DATA_TYPE_NULL) {
dataLen = 0;
} else if (*pData == TSDB_DATA_TYPE_NCHAR) {
dataLen = varDataTLen(pData + sizeof(char));
} else if (*pData == TSDB_DATA_TYPE_BIGINT || *pData == TSDB_DATA_TYPE_DOUBLE) {
dataLen = sizeof(int64_t);
} else if (*pData == TSDB_DATA_TYPE_BOOL) {
dataLen = sizeof(char);
}
dataLen += sizeof(char);
}
if (data->varLenCol.payloadAllocLen < data->varLenCol.payloadLen + dataLen) {
uint32_t newSize = data->varLenCol.payloadAllocLen;
if (newSize <= 1) {
newSize = 8;
}
while (newSize < data->varLenCol.payloadLen + dataLen) {
newSize = newSize * UDF_MEMORY_EXP_GROWTH;
}
char *buf = (char*)realloc(data->varLenCol.payload, newSize);
if (buf == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
data->varLenCol.payload = buf;
data->varLenCol.payloadAllocLen = newSize;
}
uint32_t len = data->varLenCol.payloadLen;
data->varLenCol.varOffsets[currentRow] = len;
memcpy(data->varLenCol.payload + len, pData, dataLen);
data->varLenCol.payloadLen += dataLen;
}
}
data->numOfRows = (currentRow + 1 > data->numOfRows) ? (currentRow+1) : data->numOfRows;
return 0;
}
typedef int32_t (*TUdfScalarProcFunc)(SUdfDataBlock* block, SUdfColumn *resultCol);
typedef int32_t (*TUdfAggStartFunc)(SUdfInterBuf *buf);
typedef int32_t (*TUdfAggProcessFunc)(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf);
typedef int32_t (*TUdfAggFinishFunc)(SUdfInterBuf* buf, SUdfInterBuf *resultData);
#ifdef __cplusplus
}
#endif
#endif // TDENGINE_TAOSUDF_H
......@@ -16,6 +16,13 @@
#ifndef TDENGINE_TUDF_H
#define TDENGINE_TUDF_H
#undef malloc
#define malloc malloc
#undef free
#define free free
#undef realloc
#define alloc alloc
#include <taosudf.h>
#include <stdint.h>
#include <stdbool.h>
......@@ -36,56 +43,6 @@ extern "C" {
#endif
#define UDF_DNODE_ID_ENV_NAME "DNODE_ID"
//======================================================================================
//begin API to taosd and qworker
typedef struct SUdfColumnMeta {
int16_t type;
int32_t bytes;
uint8_t precision;
uint8_t scale;
} SUdfColumnMeta;
typedef struct SUdfColumnData {
int32_t numOfRows;
int32_t rowsAlloc;
union {
struct {
int32_t nullBitmapLen;
char *nullBitmap;
int32_t dataLen;
char *data;
} fixLenCol;
struct {
int32_t varOffsetsLen;
int32_t *varOffsets;
int32_t payloadLen;
char *payload;
int32_t payloadAllocLen;
} varLenCol;
};
} SUdfColumnData;
typedef struct SUdfColumn {
SUdfColumnMeta colMeta;
bool hasNull;
SUdfColumnData colData;
} SUdfColumn;
typedef struct SUdfDataBlock {
int32_t numOfRows;
int32_t numOfCols;
SUdfColumn **udfCols;
} SUdfDataBlock;
typedef struct SUdfInterBuf {
int32_t bufLen;
char* buf;
int8_t numOfResult; //zero or one
} SUdfInterBuf;
typedef void *UdfcFuncHandle;
//low level APIs
/**
......@@ -127,177 +84,6 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock);
int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols, SScalarParam *output);
int32_t cleanUpUdfs();
// end API to taosd and qworker
//=============================================================================================================================
// begin API to UDF writer.
// dynamic lib init and destroy
typedef int32_t (*TUdfInitFunc)();
typedef int32_t (*TUdfDestroyFunc)();
//TODO: add API to check function arguments type, number etc.
#define UDF_MEMORY_EXP_GROWTH 1.5
#define udfColDataIsNull_var(pColumn, row) ((pColumn->colData.varLenCol.varOffsets)[row] == -1)
#define udfColDataIsNull_f(pColumn, row) ((BMCharPos(pColumn->colData.fixLenCol.nullBitmap, row) & (1u << (7u - BitPos(row)))) == (1u << (7u - BitPos(row))))
#define udfColDataSetNull_f(pColumn, row) \
do { \
BMCharPos(pColumn->colData.fixLenCol.nullBitmap, row) |= (1u << (7u - BitPos(row))); \
} while (0)
#define udfColDataSetNotNull_f(pColumn, r_) \
do { \
BMCharPos(pColumn->colData.fixLenCol.nullBitmap, r_) &= ~(1u << (7u - BitPos(r_))); \
} while (0)
#define udfColDataSetNull_var(pColumn, row) ((pColumn->colData.varLenCol.varOffsets)[row] = -1)
static FORCE_INLINE char* udfColDataGetData(const SUdfColumn* pColumn, int32_t row) {
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
return pColumn->colData.varLenCol.payload + pColumn->colData.varLenCol.varOffsets[row];
} else {
return pColumn->colData.fixLenCol.data + pColumn->colMeta.bytes * row;
}
}
static FORCE_INLINE bool udfColDataIsNull(const SUdfColumn* pColumn, int32_t row) {
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
if (pColumn->colMeta.type == TSDB_DATA_TYPE_JSON) {
if (udfColDataIsNull_var(pColumn, row)) {
return true;
}
char* data = udfColDataGetData(pColumn, row);
return (*data == TSDB_DATA_TYPE_NULL);
} else {
return udfColDataIsNull_var(pColumn, row);
}
} else {
return udfColDataIsNull_f(pColumn, row);
}
}
static FORCE_INLINE int32_t udfColEnsureCapacity(SUdfColumn* pColumn, int32_t newCapacity) {
SUdfColumnMeta *meta = &pColumn->colMeta;
SUdfColumnData *data = &pColumn->colData;
if (newCapacity== 0 || newCapacity <= data->rowsAlloc) {
return TSDB_CODE_SUCCESS;
}
int allocCapacity = TMAX(data->rowsAlloc, 8);
while (allocCapacity < newCapacity) {
allocCapacity *= UDF_MEMORY_EXP_GROWTH;
}
if (IS_VAR_DATA_TYPE(meta->type)) {
char* tmp = taosMemoryRealloc(data->varLenCol.varOffsets, sizeof(int32_t) * allocCapacity);
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
data->varLenCol.varOffsets = (int32_t*)tmp;
data->varLenCol.varOffsetsLen = sizeof(int32_t) * allocCapacity;
// for payload, add data in udfColDataAppend
} else {
char* tmp = taosMemoryRealloc(data->fixLenCol.nullBitmap, BitmapLen(allocCapacity));
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
data->fixLenCol.nullBitmap = tmp;
data->fixLenCol.nullBitmapLen = BitmapLen(allocCapacity);
if (meta->type == TSDB_DATA_TYPE_NULL) {
return TSDB_CODE_SUCCESS;
}
tmp = taosMemoryRealloc(data->fixLenCol.data, allocCapacity* meta->bytes);
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
data->fixLenCol.data = tmp;
data->fixLenCol.dataLen = allocCapacity* meta->bytes;
}
data->rowsAlloc = allocCapacity;
return TSDB_CODE_SUCCESS;
}
static FORCE_INLINE void udfColDataSetNull(SUdfColumn* pColumn, int32_t row) {
udfColEnsureCapacity(pColumn, row+1);
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
udfColDataSetNull_var(pColumn, row);
} else {
udfColDataSetNull_f(pColumn, row);
}
pColumn->hasNull = true;
}
static FORCE_INLINE int32_t udfColDataSet(SUdfColumn* pColumn, uint32_t currentRow, const char* pData, bool isNull) {
SUdfColumnMeta *meta = &pColumn->colMeta;
SUdfColumnData *data = &pColumn->colData;
udfColEnsureCapacity(pColumn, currentRow+1);
bool isVarCol = IS_VAR_DATA_TYPE(meta->type);
if (isNull) {
udfColDataSetNull(pColumn, currentRow);
} else {
if (!isVarCol) {
colDataSetNotNull_f(data->fixLenCol.nullBitmap, currentRow);
memcpy(data->fixLenCol.data + meta->bytes * currentRow, pData, meta->bytes);
} else {
int32_t dataLen = varDataTLen(pData);
if (meta->type == TSDB_DATA_TYPE_JSON) {
if (*pData == TSDB_DATA_TYPE_NULL) {
dataLen = 0;
} else if (*pData == TSDB_DATA_TYPE_NCHAR) {
dataLen = varDataTLen(pData + CHAR_BYTES);
} else if (*pData == TSDB_DATA_TYPE_BIGINT || *pData == TSDB_DATA_TYPE_DOUBLE) {
dataLen = LONG_BYTES;
} else if (*pData == TSDB_DATA_TYPE_BOOL) {
dataLen = CHAR_BYTES;
}
dataLen += CHAR_BYTES;
}
if (data->varLenCol.payloadAllocLen < data->varLenCol.payloadLen + dataLen) {
uint32_t newSize = data->varLenCol.payloadAllocLen;
if (newSize <= 1) {
newSize = 8;
}
while (newSize < data->varLenCol.payloadLen + dataLen) {
newSize = newSize * UDF_MEMORY_EXP_GROWTH;
}
char *buf = taosMemoryRealloc(data->varLenCol.payload, newSize);
if (buf == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
data->varLenCol.payload = buf;
data->varLenCol.payloadAllocLen = newSize;
}
uint32_t len = data->varLenCol.payloadLen;
data->varLenCol.varOffsets[currentRow] = len;
memcpy(data->varLenCol.payload + len, pData, dataLen);
data->varLenCol.payloadLen += dataLen;
}
}
data->numOfRows = TMAX(currentRow + 1, data->numOfRows);
return 0;
}
typedef int32_t (*TUdfScalarProcFunc)(SUdfDataBlock* block, SUdfColumn *resultCol);
typedef int32_t (*TUdfAggStartFunc)(SUdfInterBuf *buf);
typedef int32_t (*TUdfAggProcessFunc)(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf);
typedef int32_t (*TUdfAggFinishFunc)(SUdfInterBuf* buf, SUdfInterBuf *resultData);
// end API to UDF writer
//=======================================================================================================================
#ifdef __cplusplus
}
......
......@@ -109,6 +109,9 @@ typedef struct {
char version[MON_VER_LEN];
float master_uptime; // day
int32_t monitor_interval; // sec
int32_t dbs_total;
int32_t stbs_total;
int64_t tbs_total;
int32_t vgroups_total;
int32_t vgroups_alive;
int32_t vnodes_total;
......@@ -135,6 +138,15 @@ typedef struct {
SArray *vgroups; // array of SMonVgroupDesc
} SMonVgroupInfo;
typedef struct {
char stb_name[TSDB_TABLE_NAME_LEN];
char database_name[TSDB_DB_NAME_LEN];
} SMonStbDesc;
typedef struct {
SArray *stbs; // array of SMonStbDesc
} SMonStbInfo;
typedef struct {
int32_t expire_time;
int64_t timeseries_used;
......@@ -144,6 +156,7 @@ typedef struct {
typedef struct {
SMonClusterInfo cluster;
SMonVgroupInfo vgroup;
SMonStbInfo stb;
SMonGrantInfo grant;
SMonSysInfo sys;
SMonLogs log;
......
......@@ -28,6 +28,18 @@ extern "C" {
#define DESCRIBE_RESULT_TYPE_LEN (20 + VARSTR_HEADER_SIZE)
#define DESCRIBE_RESULT_NOTE_LEN (8 + VARSTR_HEADER_SIZE)
#define SHOW_CREATE_DB_RESULT_COLS 2
#define SHOW_CREATE_DB_RESULT_FIELD1_LEN (TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE)
#define SHOW_CREATE_DB_RESULT_FIELD2_LEN (TSDB_MAX_BINARY_LEN + VARSTR_HEADER_SIZE)
#define SHOW_CREATE_TB_RESULT_COLS 2
#define SHOW_CREATE_TB_RESULT_FIELD1_LEN (TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE)
#define SHOW_CREATE_TB_RESULT_FIELD2_LEN (TSDB_MAX_BINARY_LEN + VARSTR_HEADER_SIZE)
#define SHOW_LOCAL_VARIABLES_RESULT_COLS 2
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
#define PRIVILEGE_TYPE_MASK(n) (1 << n)
#define PRIVILEGE_TYPE_ALL PRIVILEGE_TYPE_MASK(0)
......@@ -119,14 +131,14 @@ typedef struct SCreateTableStmt {
} SCreateTableStmt;
typedef struct SCreateSubTableClause {
ENodeType type;
char dbName[TSDB_DB_NAME_LEN];
char tableName[TSDB_TABLE_NAME_LEN];
char useDbName[TSDB_DB_NAME_LEN];
char useTableName[TSDB_TABLE_NAME_LEN];
bool ignoreExists;
SNodeList* pSpecificTags;
SNodeList* pValsOfTags;
ENodeType type;
char dbName[TSDB_DB_NAME_LEN];
char tableName[TSDB_TABLE_NAME_LEN];
char useDbName[TSDB_DB_NAME_LEN];
char useTableName[TSDB_TABLE_NAME_LEN];
bool ignoreExists;
SNodeList* pSpecificTags;
SNodeList* pValsOfTags;
STableOptions* pOptions;
} SCreateSubTableClause;
......@@ -170,13 +182,16 @@ typedef struct SCreateUserStmt {
ENodeType type;
char useName[TSDB_USER_LEN];
char password[TSDB_USET_PASSWORD_LEN];
int8_t sysinfo;
} SCreateUserStmt;
typedef struct SAlterUserStmt {
ENodeType type;
char useName[TSDB_USER_LEN];
char password[TSDB_USET_PASSWORD_LEN];
int8_t alterType;
char password[TSDB_USET_PASSWORD_LEN];
int8_t enable;
int8_t sysinfo;
} SAlterUserStmt;
typedef struct SDropUserStmt {
......@@ -218,10 +233,10 @@ typedef struct SShowCreateDatabaseStmt {
} SShowCreateDatabaseStmt;
typedef struct SShowCreateTableStmt {
ENodeType type;
char dbName[TSDB_DB_NAME_LEN];
char tableName[TSDB_TABLE_NAME_LEN];
STableMeta* pMeta;
ENodeType type;
char dbName[TSDB_DB_NAME_LEN];
char tableName[TSDB_TABLE_NAME_LEN];
void* pCfg; // STableCfg
} SShowCreateTableStmt;
typedef struct SShowTableDistributedStmt {
......@@ -230,6 +245,11 @@ typedef struct SShowTableDistributedStmt {
char tableName[TSDB_TABLE_NAME_LEN];
} SShowTableDistributedStmt;
typedef struct SShowDnodeVariablesStmt {
ENodeType type;
SNode* pDnodeId;
} SShowDnodeVariablesStmt;
typedef enum EIndexType { INDEX_TYPE_SMA = 1, INDEX_TYPE_FULLTEXT } EIndexType;
typedef struct SIndexOptions {
......@@ -238,6 +258,7 @@ typedef struct SIndexOptions {
SNode* pInterval;
SNode* pOffset;
SNode* pSliding;
SNode* pStreamOptions;
} SIndexOptions;
typedef struct SCreateIndexStmt {
......@@ -254,7 +275,6 @@ typedef struct SDropIndexStmt {
ENodeType type;
bool ignoreNotExists;
char indexName[TSDB_INDEX_NAME_LEN];
char tableName[TSDB_TABLE_NAME_LEN];
} SDropIndexStmt;
typedef struct SCreateComponentNodeStmt {
......@@ -273,6 +293,7 @@ typedef struct SCreateTopicStmt {
char subDbName[TSDB_DB_NAME_LEN];
char subSTbName[TSDB_TABLE_NAME_LEN];
bool ignoreExists;
bool withMeta;
SNode* pQuery;
} SCreateTopicStmt;
......
......@@ -59,10 +59,10 @@ extern "C" {
for (SListCell* cell = (NULL != (list) ? (list)->pHead : NULL); \
(NULL != cell ? (node = &(cell->pNode), true) : (node = NULL, false)); cell = cell->pNext)
#define DESTORY_LIST(list) \
do { \
nodesDestroyList((list)); \
(list) = NULL; \
#define NODES_DESTORY_LIST(list) \
do { \
nodesDestroyList((list)); \
(list) = NULL; \
} while (0)
#define NODES_CLEAR_LIST(list) \
......@@ -180,12 +180,15 @@ typedef enum ENodeType {
QUERY_NODE_SHOW_VNODES_STMT,
QUERY_NODE_SHOW_APPS_STMT,
QUERY_NODE_SHOW_SCORES_STMT,
QUERY_NODE_SHOW_VARIABLE_STMT,
QUERY_NODE_SHOW_VARIABLES_STMT,
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
QUERY_NODE_SHOW_DNODE_VARIABLES_STMT,
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
QUERY_NODE_KILL_CONNECTION_STMT,
QUERY_NODE_KILL_QUERY_STMT,
QUERY_NODE_KILL_TRANSACTION_STMT,
......@@ -217,14 +220,17 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN,
QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN,
QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN,
QUERY_NODE_PHYSICAL_PLAN_PROJECT,
QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN,
QUERY_NODE_PHYSICAL_PLAN_HASH_AGG,
QUERY_NODE_PHYSICAL_PLAN_EXCHANGE,
QUERY_NODE_PHYSICAL_PLAN_MERGE,
QUERY_NODE_PHYSICAL_PLAN_SORT,
QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT,
QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
......@@ -279,6 +285,7 @@ int32_t nodesListPushFront(SNodeList* pList, SNode* pNode);
SListCell* nodesListErase(SNodeList* pList, SListCell* pCell);
void nodesListInsertList(SNodeList* pTarget, SListCell* pPos, SNodeList* pSrc);
SNode* nodesListGetNode(SNodeList* pList, int32_t index);
SListCell* nodesListGetCell(SNodeList* pList, int32_t index);
void nodesDestroyList(SNodeList* pList);
// Only clear the linked list structure, without releasing the elements inside
void nodesClearList(SNodeList* pList);
......
......@@ -24,6 +24,8 @@ extern "C" {
#include "querynodes.h"
#include "tname.h"
#define SLOT_NAME_LEN TSDB_TABLE_NAME_LEN + TSDB_COL_NAME_LEN
typedef struct SLogicNode {
ENodeType type;
SNodeList* pTargets; // SColumnNode
......@@ -32,6 +34,8 @@ typedef struct SLogicNode {
struct SLogicNode* pParent;
int32_t optimizedFlag;
uint8_t precision;
SNode* pLimit;
SNode* pSlimit;
} SLogicNode;
typedef enum EScanType {
......@@ -40,7 +44,8 @@ typedef enum EScanType {
SCAN_TYPE_SYSTEM_TABLE,
SCAN_TYPE_STREAM,
SCAN_TYPE_TABLE_MERGE,
SCAN_TYPE_BLOCK_INFO
SCAN_TYPE_BLOCK_INFO,
SCAN_TYPE_LAST_ROW
} EScanType;
typedef struct SScanLogicNode {
......@@ -65,12 +70,14 @@ typedef struct SScanLogicNode {
int8_t intervalUnit;
int8_t slidingUnit;
SNode* pTagCond;
SNode* pTagIndexCond;
int8_t triggerType;
int64_t watermark;
int16_t tsColId;
double filesFactor;
SArray* pSmaIndexes;
SNodeList* pPartTags;
SNodeList* pGroupTags;
bool groupSort;
} SScanLogicNode;
typedef struct SJoinLogicNode {
......@@ -90,15 +97,12 @@ typedef struct SProjectLogicNode {
SLogicNode node;
SNodeList* pProjections;
char stmtName[TSDB_TABLE_NAME_LEN];
int64_t limit;
int64_t offset;
int64_t slimit;
int64_t soffset;
} SProjectLogicNode;
typedef struct SIndefRowsFuncLogicNode {
SLogicNode node;
SNodeList* pFuncs;
bool isTailFunc;
} SIndefRowsFuncLogicNode;
typedef struct SInterpFuncLogicNode {
......@@ -137,6 +141,7 @@ typedef struct SMergeLogicNode {
SNodeList* pInputs;
int32_t numOfChannels;
int32_t srcGroupId;
bool groupSort;
} SMergeLogicNode;
typedef enum EWindowType { WINDOW_TYPE_INTERVAL = 1, WINDOW_TYPE_SESSION, WINDOW_TYPE_STATE } EWindowType;
......@@ -183,6 +188,7 @@ typedef struct SFillLogicNode {
typedef struct SSortLogicNode {
SLogicNode node;
SNodeList* pSortKeys;
bool groupSort;
} SSortLogicNode;
typedef struct SPartitionLogicNode {
......@@ -229,6 +235,7 @@ typedef struct SSlotDescNode {
bool reserve;
bool output;
bool tag;
char name[SLOT_NAME_LEN];
} SSlotDescNode;
typedef struct SDataBlockDescNode {
......@@ -246,6 +253,8 @@ typedef struct SPhysiNode {
SNode* pConditions;
SNodeList* pChildren;
struct SPhysiNode* pParent;
SNode* pLimit;
SNode* pSlimit;
} SPhysiNode;
typedef struct SScanPhysiNode {
......@@ -260,6 +269,7 @@ typedef struct SScanPhysiNode {
typedef SScanPhysiNode STagScanPhysiNode;
typedef SScanPhysiNode SBlockDistScanPhysiNode;
typedef SScanPhysiNode SLastRowScanPhysiNode;
typedef struct SSystemTableScanPhysiNode {
SScanPhysiNode scan;
......@@ -275,7 +285,8 @@ typedef struct STableScanPhysiNode {
double ratio;
int32_t dataRequired;
SNodeList* pDynamicScanFuncs;
SNodeList* pPartitionTags;
SNodeList* pGroupTags;
bool groupSort;
int64_t interval;
int64_t offset;
int64_t sliding;
......@@ -294,10 +305,6 @@ typedef STableScanPhysiNode SStreamScanPhysiNode;
typedef struct SProjectPhysiNode {
SPhysiNode node;
SNodeList* pProjections;
int64_t limit;
int64_t offset;
int64_t slimit;
int64_t soffset;
} SProjectPhysiNode;
typedef struct SIndefRowsFuncPhysiNode {
......@@ -353,6 +360,7 @@ typedef struct SMergePhysiNode {
SNodeList* pTargets;
int32_t numOfChannels;
int32_t srcGroupId;
bool groupSort;
} SMergePhysiNode;
typedef struct SWinodwPhysiNode {
......@@ -376,6 +384,7 @@ typedef struct SIntervalPhysiNode {
} SIntervalPhysiNode;
typedef SIntervalPhysiNode SMergeIntervalPhysiNode;
typedef SIntervalPhysiNode SMergeAlignedIntervalPhysiNode;
typedef SIntervalPhysiNode SStreamIntervalPhysiNode;
typedef SIntervalPhysiNode SStreamFinalIntervalPhysiNode;
typedef SIntervalPhysiNode SStreamSemiIntervalPhysiNode;
......@@ -417,6 +426,8 @@ typedef struct SSortPhysiNode {
SNodeList* pTargets;
} SSortPhysiNode;
typedef SSortPhysiNode SGroupSortPhysiNode;
typedef struct SPartitionPhysiNode {
SPhysiNode node;
SNodeList* pExprs; // these are expression list of partition_by_clause
......@@ -456,6 +467,7 @@ typedef struct SSubplan {
int32_t msgType; // message type for subplan, used to denote the send message type to vnode.
int32_t level; // the execution level of current subplan, starting from 0 in a top-down manner.
char dbFName[TSDB_DB_FNAME_LEN];
char user[TSDB_USER_LEN];
SQueryNodeAddr execNode; // for the scan/modify subplan, the optional execution node
SQueryNodeStat execNodeStat; // only for scan subplan
SNodeList* pChildren; // the datasource subplan,from which to fetch the result
......@@ -463,6 +475,7 @@ typedef struct SSubplan {
SPhysiNode* pNode; // physical plan of current subplan
SDataSinkNode* pDataSink; // data of the subplan flow into the datasink
SNode* pTagCond;
SNode* pTagIndexCond;
} SSubplan;
typedef enum EExplainMode { EXPLAIN_MODE_DISABLE = 1, EXPLAIN_MODE_STATIC, EXPLAIN_MODE_ANALYZE } EExplainMode;
......
......@@ -52,7 +52,7 @@ typedef struct SExprNode {
SArray* pAssociation;
} SExprNode;
typedef enum EColumnType { COLUMN_TYPE_COLUMN = 1, COLUMN_TYPE_TAG } EColumnType;
typedef enum EColumnType { COLUMN_TYPE_COLUMN = 1, COLUMN_TYPE_TAG, COLUMN_TYPE_TBNAME } EColumnType;
typedef struct SColumnNode {
SExprNode node; // QUERY_NODE_COLUMN
......@@ -258,6 +258,8 @@ typedef struct SSelectStmt {
bool hasUniqueFunc;
bool hasTailFunc;
bool hasInterpFunc;
bool hasLastRowFunc;
bool groupSort;
} SSelectStmt;
typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType;
......@@ -287,11 +289,11 @@ typedef enum ESqlClause {
} ESqlClause;
typedef struct SDeleteStmt {
ENodeType type; // QUERY_NODE_DELETE_STMT
SNode* pFromTable; // FROM clause
SNode* pWhere; // WHERE clause
SNode* pCountFunc; // count the number of rows affected
SNode* pTagIndexCond; // pWhere divided into pTagIndexCond and timeRange
ENodeType type; // QUERY_NODE_DELETE_STMT
SNode* pFromTable; // FROM clause
SNode* pWhere; // WHERE clause
SNode* pCountFunc; // count the number of rows affected
SNode* pTagCond; // pWhere divided into pTagCond and timeRange
STimeWindow timeRange;
uint8_t precision;
bool deleteZeroRows;
......@@ -384,6 +386,7 @@ bool nodesIsArithmeticOp(const SOperatorNode* pOp);
bool nodesIsComparisonOp(const SOperatorNode* pOp);
bool nodesIsJsonOp(const SOperatorNode* pOp);
bool nodesIsRegularOp(const SOperatorNode* pOp);
bool nodesIsBitwiseOp(const SOperatorNode* pOp);
bool nodesExprHasColumn(SNode* pNode);
bool nodesExprsHasColumn(SNodeList* pList);
......@@ -395,7 +398,8 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal);
char* nodesGetFillModeString(EFillMode mode);
int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc);
int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagCond, SNode** pOtherCond);
int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond,
SNode** pOtherCond);
#ifdef __cplusplus
}
......
......@@ -51,6 +51,8 @@ typedef struct SParseContext {
bool isSuperUser;
bool async;
int8_t schemalessType;
const char* svrVer;
bool nodeOffline;
} SParseContext;
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
......
......@@ -24,19 +24,19 @@ extern "C" {
#include "taos.h"
typedef struct SPlanContext {
uint64_t queryId;
int32_t acctId;
SEpSet mgmtEpSet;
SNode* pAstRoot;
bool topicQuery;
bool streamQuery;
bool rSmaQuery;
bool showRewrite;
int8_t triggerType;
int64_t watermark;
char* pMsg;
int32_t msgLen;
// double filesFactor;
uint64_t queryId;
int32_t acctId;
SEpSet mgmtEpSet;
SNode* pAstRoot;
bool topicQuery;
bool streamQuery;
bool rSmaQuery;
bool showRewrite;
int8_t triggerType;
int64_t watermark;
char* pMsg;
int32_t msgLen;
const char* pUser;
} SPlanContext;
// Create the physical plan for the query, according to the AST.
......
......@@ -16,6 +16,7 @@
#ifndef _TD_QUERY_H_
#define _TD_QUERY_H_
// clang-foramt off
#ifdef __cplusplus
extern "C" {
#endif
......@@ -71,7 +72,7 @@ typedef struct SIndexMeta {
} SIndexMeta;
typedef struct STbVerInfo {
char tbFName[TSDB_TABLE_FNAME_LEN];
char tbFName[TSDB_TABLE_FNAME_LEN];
int32_t sversion;
int32_t tversion;
} STbVerInfo;
......@@ -141,7 +142,7 @@ typedef struct SDataBuf {
typedef struct STargetInfo {
ETargetType type;
char* dbFName; // used to update db's vgroup epset
char* dbFName; // used to update db's vgroup epset
int32_t vgId;
} STargetInfo;
......@@ -149,15 +150,15 @@ typedef int32_t (*__async_send_cb_fn_t)(void* param, const SDataBuf* pMsg, int32
typedef int32_t (*__async_exec_fn_t)(void* param);
typedef struct SRequestConnInfo {
void* pTrans;
uint64_t requestId;
int64_t requestObjRefId;
SEpSet mgmtEps;
void* pTrans;
uint64_t requestId;
int64_t requestObjRefId;
SEpSet mgmtEps;
} SRequestConnInfo;
typedef struct SMsgSendInfo {
__async_send_cb_fn_t fp; // async callback function
STargetInfo target; // for update epset
__async_send_cb_fn_t fp; // async callback function
STargetInfo target; // for update epset
void* param;
uint64_t requestId;
uint64_t requestObjRefId;
......@@ -206,9 +207,15 @@ int32_t queryCreateTableMetaFromMsg(STableMetaRsp* msg, bool isSuperTable, STabl
char* jobTaskStatusStr(int32_t status);
SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* name);
void destroyQueryExecRes(SQueryExecRes* pRes);
extern int32_t (*queryBuildMsg[TDMT_MAX])(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallocFp)(int32_t));
void destroyQueryExecRes(SQueryExecRes* pRes);
int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t* len);
char* parseTagDatatoJson(void* p);
int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst);
int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst);
extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen,
void* (*mallocFp)(int32_t));
extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t msgSize);
#define SET_META_TYPE_NULL(t) (t) = META_TYPE_NULL_TABLE
......@@ -219,7 +226,7 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define NEED_CLIENT_RM_TBLMETA_ERROR(_code) \
((_code) == TSDB_CODE_PAR_TABLE_NOT_EXIST || (_code) == TSDB_CODE_VND_TB_NOT_EXIST || \
(_code) == TSDB_CODE_PAR_INVALID_COLUMNS_NUM || (_code) == TSDB_CODE_PAR_INVALID_COLUMN || \
(_code) == TSDB_CODE_PAR_TAGS_NOT_MATCHED || (_code) == TSDB_CODE_PAR_VALUE_TOO_LONG || \
(_code) == TSDB_CODE_PAR_TAGS_NOT_MATCHED || (_code) == TSDB_CODE_PAR_VALUE_TOO_LONG || \
(_code) == TSDB_CODE_PAR_INVALID_DROP_COL || ((_code) == TSDB_CODE_TDB_INVALID_TABLE_ID))
#define NEED_CLIENT_REFRESH_VG_ERROR(_code) \
((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
......@@ -227,11 +234,13 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define NEED_CLIENT_HANDLE_ERROR(_code) \
(NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || \
NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code))
#define NEED_CLIENT_RM_TBLMETA_REQ(_type) ((_type) == TDMT_VND_CREATE_TABLE || (_type) == TDMT_VND_CREATE_STB \
|| (_type) == TDMT_VND_DROP_TABLE || (_type) == TDMT_VND_DROP_STB)
#define NEED_CLIENT_RM_TBLMETA_REQ(_type) \
((_type) == TDMT_VND_CREATE_TABLE || (_type) == TDMT_VND_CREATE_STB || (_type) == TDMT_VND_DROP_TABLE || \
(_type) == TDMT_VND_DROP_STB)
#define NEED_SCHEDULER_RETRY_ERROR(_code) \
((_code) == TSDB_CODE_RPC_REDIRECT || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL || (_code) == TSDB_CODE_SCH_TIMEOUT_ERROR)
#define NEED_SCHEDULER_RETRY_ERROR(_code) \
((_code) == TSDB_CODE_RPC_REDIRECT || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL || \
(_code) == TSDB_CODE_SCH_TIMEOUT_ERROR)
#define REQUEST_TOTAL_EXEC_TIMES 2
......@@ -262,19 +271,19 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define qDebug(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLog("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
taosPrintLog("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qTrace(...) \
do { \
if (qDebugFlag & DEBUG_TRACE) { \
taosPrintLog("QRY ", DEBUG_TRACE, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
taosPrintLog("QRY ", DEBUG_TRACE, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qDebugL(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
taosPrintLongString("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
taosPrintLongString("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
......@@ -308,3 +317,4 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#endif
#endif /*_TD_QUERY_H_*/
// clang-foramt on
......@@ -32,7 +32,7 @@ enum {
};
typedef struct SDeleteRes {
uint64_t uid;
uint64_t suid;
SArray* uidList;
int64_t skey;
int64_t ekey;
......
......@@ -30,9 +30,14 @@ extern "C" {
typedef struct SStreamTask SStreamTask;
enum {
TASK_STATUS__IDLE = 1,
TASK_STATUS__EXECUTING,
TASK_STATUS__CLOSING,
TASK_STATUS__NORMAL = 0,
TASK_STATUS__DROPPING,
};
enum {
TASK_EXEC_STATUS__IDLE = 1,
TASK_EXEC_STATUS__EXECUTING,
TASK_EXEC_STATUS__CLOSING,
};
enum {
......@@ -50,16 +55,13 @@ enum {
TASK_OUTPUT_STATUS__BLOCKED,
};
enum {
STREAM_CREATED_BY__USER = 1,
STREAM_CREATED_BY__SMA,
};
enum {
STREAM_INPUT__DATA_SUBMIT = 1,
STREAM_INPUT__DATA_BLOCK,
STREAM_INPUT__DATA_RETRIEVE,
STREAM_INPUT__TRIGGER,
STREAM_INPUT__CHECKPOINT,
STREAM_INPUT__DROP,
};
typedef struct {
......@@ -76,7 +78,7 @@ typedef struct {
typedef struct {
int8_t type;
int32_t sourceVg;
int32_t srcVgId;
int64_t sourceVer;
SArray* blocks; // SArray<SSDataBlock*>
......@@ -144,15 +146,9 @@ void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit);
SStreamDataSubmit* streamSubmitRefClone(SStreamDataSubmit* pSubmit);
#if 0
int32_t streamDataBlockEncode(void** buf, const SStreamDataBlock* pOutput);
void* streamDataBlockDecode(const void* buf, SStreamDataBlock* pInput);
#endif
typedef struct {
char* qmsg;
// followings are not applicable to encoder and decoder
void* inputHandle;
void* executor;
} STaskExec;
......@@ -234,23 +230,33 @@ enum {
TASK_TRIGGER_STATUS__ACTIVE,
};
typedef struct {
int32_t nodeId;
int32_t childId;
int32_t taskId;
SEpSet epSet;
} SStreamChildEpInfo;
struct SStreamTask {
int64_t streamId;
int32_t taskId;
int8_t inputType;
int8_t status;
int8_t sourceType;
int8_t isDataScan;
int8_t execType;
int8_t sinkType;
int8_t dispatchType;
int16_t dispatchMsgType;
int8_t taskStatus;
int8_t execStatus;
// node info
int32_t childId;
int32_t selfChildId;
int32_t nodeId;
SEpSet epSet;
// children info
SArray* childEpInfo; // SArray<SStreamChildEpInfo*>
// exec
STaskExec exec;
......@@ -288,6 +294,9 @@ struct SStreamTask {
SMsgCb* pMsgCb;
};
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo);
SStreamTask* tNewSStreamTask(int64_t streamId);
int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
......@@ -310,7 +319,7 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem
return -1;
}
taosWriteQitem(pTask->inputQueue->queue, pSubmitClone);
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK) {
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
taosWriteQitem(pTask->inputQueue->queue, pItem);
} else if (pItem->type == STREAM_INPUT__CHECKPOINT) {
taosWriteQitem(pTask->inputQueue->queue, pItem);
......@@ -336,10 +345,12 @@ static FORCE_INLINE int32_t streamTaskOutput(SStreamTask* pTask, SStreamDataBloc
if (pTask->sinkType == TASK_SINK__TABLE) {
ASSERT(pTask->dispatchType == TASK_DISPATCH__NONE);
pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, 0, pBlock->blocks);
taosArrayDestroyEx(pBlock->blocks, (FDelete)tDeleteSSDataBlock);
taosFreeQitem(pBlock);
} else if (pTask->sinkType == TASK_SINK__SMA) {
ASSERT(pTask->dispatchType == TASK_DISPATCH__NONE);
pTask->smaSink.smaSink(pTask->smaSink.vnode, pTask->smaSink.smaId, pBlock->blocks);
taosArrayDestroyEx(pBlock->blocks, (FDelete)tDeleteSSDataBlock);
taosFreeQitem(pBlock);
} else {
ASSERT(pTask->dispatchType != TASK_DISPATCH__NONE);
......@@ -366,9 +377,9 @@ typedef struct {
typedef struct {
int64_t streamId;
int32_t taskId;
int32_t sourceTaskId;
int32_t sourceVg;
int32_t sourceChildId;
int32_t dataSrcVgId;
int32_t upstreamTaskId;
int32_t upstreamChildId;
int32_t upstreamNodeId;
#if 0
int64_t sourceVer;
......@@ -384,6 +395,23 @@ typedef struct {
int8_t inputStatus;
} SStreamDispatchRsp;
typedef struct {
int64_t streamId;
int32_t srcTaskId;
int32_t srcNodeId;
int32_t dstTaskId;
int32_t dstNodeId;
int32_t retrieveLen;
SRetrieveTableRsp* pRetrieve;
} SStreamRetrieveReq;
typedef struct {
int64_t streamId;
int32_t childId;
int32_t rspFromTaskId;
int32_t rspToTaskId;
} SStreamRetrieveRsp;
typedef struct {
int64_t streamId;
int32_t taskId;
......@@ -398,18 +426,20 @@ typedef struct {
} SStreamTaskRecoverRsp;
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
int32_t streamLaunchByWrite(SStreamTask* pTask, int32_t vgId, SMsgCb* pMsgCb);
int32_t streamLaunchByWrite(SStreamTask* pTask, int32_t vgId);
int32_t streamSetupTrigger(SStreamTask* pTask);
int32_t streamTaskRun(SStreamTask* pTask);
int32_t streamTaskProcessRunReq(SStreamTask* pTask, SMsgCb* pMsgCb);
int32_t streamProcessDispatchReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchReq* pReq, SRpcMsg* pMsg);
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamDispatchRsp* pRsp);
int32_t streamProcessRecoverReq(SStreamTask* pTask, SMsgCb* pMsgCb, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg);
int32_t streamProcessRunReq(SStreamTask* pTask);
int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg);
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp);
int32_t streamProcessRecoverReq(SStreamTask* pTask, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg);
int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp);
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
int32_t streamProcessRetrieveRsp(SStreamTask* pTask, SStreamRetrieveRsp* pRsp);
#ifdef __cplusplus
}
#endif
......
......@@ -32,12 +32,15 @@ typedef struct SUpdateInfo {
int64_t interval;
int64_t watermark;
TSKEY minTS;
SScalableBf* pCloseWinSBF;
} SUpdateInfo;
SUpdateInfo *updateInfoInitP(SInterval* pInterval, int64_t watermark);
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark);
bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts);
void updateInfoDestroy(SUpdateInfo *pInfo);
void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
#ifdef __cplusplus
}
......
......@@ -24,8 +24,11 @@ extern "C" {
#include "tdef.h"
#include "tmsgcb.h"
extern bool gRaftDetailLog;
#define SYNC_INDEX_BEGIN 0
#define SYNC_INDEX_INVALID -1
#define SYNC_TERM_INVALID 0xFFFFFFFFFFFFFFFF
typedef uint64_t SyncNodeId;
typedef int32_t SyncGroupId;
......@@ -61,28 +64,35 @@ typedef struct SSyncCfg {
} SSyncCfg;
typedef struct SFsmCbMeta {
int32_t code;
SyncIndex index;
SyncTerm term;
uint64_t seqNum;
SyncIndex lastConfigIndex;
bool isWeak;
int32_t code;
ESyncState state;
uint64_t seqNum;
SyncTerm term;
SyncTerm currentTerm;
bool isWeak;
uint64_t flag;
} SFsmCbMeta;
typedef struct SReConfigCbMeta {
int32_t code;
SyncIndex index;
SyncTerm term;
SyncIndex lastConfigIndex;
SyncTerm currentTerm;
int32_t code;
SyncIndex index;
SyncTerm term;
uint64_t seqNum;
SyncIndex lastConfigIndex;
ESyncState state;
SyncTerm currentTerm;
bool isWeak;
uint64_t flag;
// config info
SSyncCfg oldCfg;
SSyncCfg newCfg;
bool isDrop;
uint64_t flag;
uint64_t seqNum;
SyncIndex newCfgIndex;
SyncTerm newCfgTerm;
uint64_t newCfgSeqNum;
} SReConfigCbMeta;
typedef struct SSnapshot {
......@@ -107,8 +117,7 @@ typedef struct SSyncFSM {
void (*FpReConfigCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SReConfigCbMeta cbMeta);
void (*FpLeaderTransferCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot, void *pReaderParam, void** ppReader);
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot, void* pReaderParam, void** ppReader);
int32_t (*FpGetSnapshotInfo)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
int32_t (*FpSnapshotStartRead)(struct SSyncFSM* pFsm, void** ppReader);
......@@ -148,13 +157,13 @@ typedef struct SSyncLogStore {
SyncIndex (*getCommitIndex)(struct SSyncLogStore* pLogStore);
// refactor, log[0 .. n] ==> log[m .. n]
int32_t (*syncLogSetBeginIndex)(struct SSyncLogStore* pLogStore, SyncIndex beginIndex);
int32_t (*syncLogResetBeginIndex)(struct SSyncLogStore* pLogStore);
// int32_t (*syncLogSetBeginIndex)(struct SSyncLogStore* pLogStore, SyncIndex beginIndex);
SyncIndex (*syncLogBeginIndex)(struct SSyncLogStore* pLogStore);
SyncIndex (*syncLogEndIndex)(struct SSyncLogStore* pLogStore);
bool (*syncLogIsEmpty)(struct SSyncLogStore* pLogStore);
int32_t (*syncLogEntryCount)(struct SSyncLogStore* pLogStore);
bool (*syncLogInRange)(struct SSyncLogStore* pLogStore, SyncIndex index);
int32_t (*syncLogRestoreFromSnapshot)(struct SSyncLogStore* pLogStore, SyncIndex index);
SyncIndex (*syncLogWriteIndex)(struct SSyncLogStore* pLogStore);
SyncIndex (*syncLogLastIndex)(struct SSyncLogStore* pLogStore);
......@@ -189,14 +198,14 @@ ESyncState syncGetMyRole(int64_t rid);
bool syncIsReady(int64_t rid);
const char* syncGetMyRoleStr(int64_t rid);
SyncTerm syncGetMyTerm(int64_t rid);
SyncGroupId syncGetVgId(int64_t rid);
void syncGetEpSet(int64_t rid, SEpSet* pEpSet);
int32_t syncGetVgId(int64_t rid);
int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak);
void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet);
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak);
bool syncEnvIsStart();
const char* syncStr(ESyncState state);
bool syncIsRestoreFinish(int64_t rid);
int32_t syncReconfig(int64_t rid, const SSyncCfg* pNewCfg);
// build SRpcMsg, need to call syncPropose with SRpcMsg
......
......@@ -43,7 +43,7 @@ void setElectTimerMS(int64_t rid, int32_t electTimerMS);
void setHeartbeatTimerMS(int64_t rid, int32_t hbTimerMS);
// for compatibility, the same as syncPropose
int32_t syncForwardToPeer(int64_t rid, const SRpcMsg* pMsg, bool isWeak);
int32_t syncForwardToPeer(int64_t rid, SRpcMsg* pMsg, bool isWeak);
// utils
const char* syncUtilState2String(ESyncState state);
......@@ -468,7 +468,7 @@ typedef struct SyncLeaderTransfer {
SRaftId destId;
*/
SNodeInfo newNodeInfo;
SRaftId newLeaderId;
SRaftId newLeaderId;
} SyncLeaderTransfer;
SyncLeaderTransfer* syncLeaderTransferBuild(int32_t vgId);
......@@ -489,11 +489,42 @@ void syncLeaderTransferPrint2(char* s, const SyncLeaderTransfer* pMsg);
void syncLeaderTransferLog(const SyncLeaderTransfer* pMsg);
void syncLeaderTransferLog2(char* s, const SyncLeaderTransfer* pMsg);
// ---------------------------------------------
typedef struct SyncReconfigFinish {
uint32_t bytes;
int32_t vgId;
uint32_t msgType;
SSyncCfg oldCfg;
SSyncCfg newCfg;
SyncIndex newCfgIndex;
SyncTerm newCfgTerm;
uint64_t newCfgSeqNum;
} SyncReconfigFinish;
SyncReconfigFinish* syncReconfigFinishBuild(int32_t vgId);
void syncReconfigFinishDestroy(SyncReconfigFinish* pMsg);
void syncReconfigFinishSerialize(const SyncReconfigFinish* pMsg, char* buf, uint32_t bufLen);
void syncReconfigFinishDeserialize(const char* buf, uint32_t len, SyncReconfigFinish* pMsg);
char* syncReconfigFinishSerialize2(const SyncReconfigFinish* pMsg, uint32_t* len);
SyncReconfigFinish* syncReconfigFinishDeserialize2(const char* buf, uint32_t len);
void syncReconfigFinish2RpcMsg(const SyncReconfigFinish* pMsg, SRpcMsg* pRpcMsg);
void syncReconfigFinishFromRpcMsg(const SRpcMsg* pRpcMsg, SyncReconfigFinish* pMsg);
SyncReconfigFinish* syncReconfigFinishFromRpcMsg2(const SRpcMsg* pRpcMsg);
cJSON* syncReconfigFinish2Json(const SyncReconfigFinish* pMsg);
char* syncReconfigFinish2Str(const SyncReconfigFinish* pMsg);
// for debug ----------------------
void syncReconfigFinishPrint(const SyncReconfigFinish* pMsg);
void syncReconfigFinishPrint2(char* s, const SyncReconfigFinish* pMsg);
void syncReconfigFinishLog(const SyncReconfigFinish* pMsg);
void syncReconfigFinishLog2(char* s, const SyncReconfigFinish* pMsg);
// on message ----------------------
int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg);
int32_t syncNodeOnPingReplyCb(SSyncNode* ths, SyncPingReply* pMsg);
int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg);
int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg);
int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg, SyncIndex* pRetIndex);
int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg);
int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg);
int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg);
......@@ -510,7 +541,7 @@ int32_t syncNodeOnSnapshotRspCb(SSyncNode* ths, SyncSnapshotRsp* pMsg);
// -----------------------------------------
typedef int32_t (*FpOnPingCb)(SSyncNode* ths, SyncPing* pMsg);
typedef int32_t (*FpOnPingReplyCb)(SSyncNode* ths, SyncPingReply* pMsg);
typedef int32_t (*FpOnClientRequestCb)(SSyncNode* ths, SyncClientRequest* pMsg);
typedef int32_t (*FpOnClientRequestCb)(SSyncNode* ths, SyncClientRequest* pMsg, SyncIndex* pRetIndex);
typedef int32_t (*FpOnRequestVoteCb)(SSyncNode* ths, SyncRequestVote* pMsg);
typedef int32_t (*FpOnRequestVoteReplyCb)(SSyncNode* ths, SyncRequestVoteReply* pMsg);
typedef int32_t (*FpOnAppendEntriesCb)(SSyncNode* ths, SyncAppendEntries* pMsg);
......
......@@ -34,10 +34,8 @@ extern int32_t tsRpcHeadSize;
typedef struct {
uint32_t clientIp;
uint16_t clientPort;
union {
char user[TSDB_USER_LEN];
int64_t applyIndex;
};
int64_t applyIndex;
char user[TSDB_USER_LEN];
} SRpcConnInfo;
typedef struct SRpcHandleInfo {
......@@ -71,7 +69,7 @@ typedef struct SRpcMsg {
} SRpcMsg;
typedef void (*RpcCfp)(void *parent, SRpcMsg *, SEpSet *rf);
typedef bool (*RpcRfp)(int32_t code);
typedef bool (*RpcRfp)(int32_t code, tmsg_t msgType);
typedef struct SRpcInit {
char localFqdn[TSDB_FQDN_LEN];
......
......@@ -195,7 +195,6 @@ void walCloseReadHandle(SWalReadHandle *);
int32_t walReadWithHandle(SWalReadHandle *pRead, int64_t ver);
// only for tq usage
// int32_t walReadWithHandle_s(SWalReadHandle *pRead, int64_t ver, SWalReadHead **ppHead);
void walSetReaderCapacity(SWalReadHandle *pRead, int32_t capacity);
int32_t walFetchHead(SWalReadHandle *pRead, int64_t ver, SWalHead *pHead);
int32_t walFetchBody(SWalReadHandle *pRead, SWalHead **ppHead);
......@@ -211,13 +210,8 @@ void walCloseRef(SWalRef *);
int32_t walRefVer(SWalRef *, int64_t ver);
int32_t walUnrefVer(SWal *);
// deprecated
#if 0
int32_t walRead(SWal *, SWalHead **, int64_t ver);
int32_t walReadWithFp(SWal *, FWalWrite writeFp, int64_t verStart, int32_t readNum);
#endif
// lifecycle check
bool walIsEmpty(SWal *);
int64_t walGetFirstVer(SWal *);
int64_t walGetSnapshotVer(SWal *);
int64_t walGetLastVer(SWal *);
......
......@@ -52,6 +52,7 @@ extern "C" {
#endif
#else
#include <malloc.h>
#include <time.h>
#ifndef TD_USE_WINSOCK
#include <winsock2.h>
......@@ -104,8 +105,6 @@ extern "C" {
#include "osTimezone.h"
#include "osEnv.h"
void osDefaultInit();
#ifdef __cplusplus
}
#endif
......
......@@ -25,11 +25,10 @@ extern "C" {
#define TSWAP(a, b) \
do { \
char *__tmp = taosMemoryMalloc(sizeof(a)); \
char *__tmp = alloca(sizeof(a)); \
memcpy(__tmp, &(a), sizeof(a)); \
memcpy(&(a), &(b), sizeof(a)); \
memcpy(&(b), __tmp, sizeof(a)); \
taosMemoryFree(__tmp); \
} while (0)
#ifdef WINDOWS
......
......@@ -157,7 +157,10 @@ int32_t taosNonblockwrite(TdSocketPtr pSocket, char *ptr, int32_t nbytes);
int64_t taosCopyFds(TdSocketPtr pSrcSocket, TdSocketPtr pDestSocket, int64_t len);
void taosWinSocketInit();
int taosCreateSocketWithTimeOutOpt(uint32_t conn_timeout_sec);
/*
* set timeout(ms)
*/
int32_t taosCreateSocketWithTimeout(uint32_t timeout);
TdSocketPtr taosOpenUdpSocket(uint32_t localIp, uint16_t localPort);
TdSocketPtr taosOpenTcpClientSocket(uint32_t ip, uint16_t port, uint32_t localIp);
......
......@@ -29,6 +29,9 @@ extern "C" {
#define tcgetattr TCGETATTR_FUNC_TAOS_FORBID
#endif
#define TAOS_CONSOLE_PROMPT_HEADER "taos> "
#define TAOS_CONSOLE_PROMPT_CONTINUE " -> "
typedef struct TdCmd *TdCmdPtr;
TdCmdPtr taosOpenCmd(const char* cmd);
......
......@@ -38,7 +38,7 @@ extern "C" {
#define MILLISECOND_PER_SECOND (1000i64)
#else
#define MILLISECOND_PER_SECOND ((int64_t)1000L)
#define MILLISECOND_PER_SECOND ((int64_t)1000LL)
#endif
#define MILLISECOND_PER_MINUTE (MILLISECOND_PER_SECOND * 60)
......@@ -46,9 +46,9 @@ extern "C" {
#define MILLISECOND_PER_DAY (MILLISECOND_PER_HOUR * 24)
#define MILLISECOND_PER_WEEK (MILLISECOND_PER_DAY * 7)
#define NANOSECOND_PER_USEC (1000L)
#define NANOSECOND_PER_MSEC (1000000L)
#define NANOSECOND_PER_SEC (1000000000L)
#define NANOSECOND_PER_USEC (1000LL)
#define NANOSECOND_PER_MSEC (1000000LL)
#define NANOSECOND_PER_SEC (1000000000LL)
#define NANOSECOND_PER_MINUTE (NANOSECOND_PER_SEC * 60)
#define NANOSECOND_PER_HOUR (NANOSECOND_PER_MINUTE * 60)
#define NANOSECOND_PER_DAY (NANOSECOND_PER_HOUR * 24)
......@@ -65,21 +65,21 @@ int32_t taosGetTimestampSec();
static FORCE_INLINE int64_t taosGetTimestampMs() {
struct timeval systemTime;
taosGetTimeOfDay(&systemTime);
return (int64_t)systemTime.tv_sec * 1000L + (int64_t)systemTime.tv_usec / 1000;
return (int64_t)systemTime.tv_sec * 1000LL + (int64_t)systemTime.tv_usec / 1000;
}
//@return timestamp in microsecond
static FORCE_INLINE int64_t taosGetTimestampUs() {
struct timeval systemTime;
taosGetTimeOfDay(&systemTime);
return (int64_t)systemTime.tv_sec * 1000000L + (int64_t)systemTime.tv_usec;
return (int64_t)systemTime.tv_sec * 1000000LL + (int64_t)systemTime.tv_usec;
}
//@return timestamp in nanosecond
static FORCE_INLINE int64_t taosGetTimestampNs() {
struct timespec systemTime = {0};
taosClockGetTime(CLOCK_REALTIME, &systemTime);
return (int64_t)systemTime.tv_sec * 1000000000L + (int64_t)systemTime.tv_nsec;
return (int64_t)systemTime.tv_sec * 1000000000LL + (int64_t)systemTime.tv_nsec;
}
char * taosStrpTime(const char *buf, const char *fmt, struct tm *tm);
......
......@@ -71,6 +71,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE TAOS_DEF_ERROR_CODE(0, 0x0029)
#define TSDB_CODE_INVALID_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x0030)
#define TSDB_CODE_MSG_DECODE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0031)
#define TSDB_CODE_NO_AVAIL_DISK TAOS_DEF_ERROR_CODE(0, 0x0032)
#define TSDB_CODE_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0032)
#define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0040)
......@@ -130,6 +131,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TSC_STMT_CLAUSE_ERROR TAOS_DEF_ERROR_CODE(0, 0X0227)
#define TSDB_CODE_TSC_QUERY_KILLED TAOS_DEF_ERROR_CODE(0, 0X0228)
#define TSDB_CODE_TSC_NO_EXEC_NODE TAOS_DEF_ERROR_CODE(0, 0X0229)
#define TSDB_CODE_TSC_NOT_STABLE_ERROR TAOS_DEF_ERROR_CODE(0, 0X022a)
// mnode-common
#define TSDB_CODE_MND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0300)
......@@ -417,6 +419,10 @@ int32_t* taosGetErrno();
#define TSDB_CODE_SYN_NOT_LEADER TAOS_DEF_ERROR_CODE(0, 0x090C)
#define TSDB_CODE_SYN_ONE_REPLICA TAOS_DEF_ERROR_CODE(0, 0x090D)
#define TSDB_CODE_SYN_NOT_IN_NEW_CONFIG TAOS_DEF_ERROR_CODE(0, 0x090E)
#define TSDB_CODE_SYN_NEW_CONFIG_ERROR TAOS_DEF_ERROR_CODE(0, 0x090F)
#define TSDB_CODE_SYN_RECONFIG_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0910)
#define TSDB_CODE_SYN_PROPOSE_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0911)
#define TSDB_CODE_SYN_STANDBY_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0912)
#define TSDB_CODE_SYN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x09FF)
// tq
......
......@@ -33,7 +33,8 @@ typedef enum {
CFG_STYPE_ENV_CMD,
CFG_STYPE_APOLLO_URL,
CFG_STYPE_ARG_LIST,
CFG_STYPE_TAOS_OPTIONS
CFG_STYPE_TAOS_OPTIONS,
CFG_STYPE_ALTER_CMD,
} ECfgSrcType;
typedef enum {
......@@ -104,6 +105,8 @@ int32_t cfgAddTimezone(SConfig *pCfg, const char *name, const char *defaultVal);
const char *cfgStypeStr(ECfgSrcType type);
const char *cfgDtypeStr(ECfgDataType type);
void cfgDumpItemValue(SConfigItem *pItem, char* buf, int32_t bufSize, int32_t* pLen);
void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump);
int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char* apolloUrl);
......
......@@ -49,12 +49,12 @@ extern const int32_t TYPE_BYTES[16];
#define TSDB_DATA_BOOL_NULL 0x02
#define TSDB_DATA_TINYINT_NULL 0x80
#define TSDB_DATA_SMALLINT_NULL 0x8000
#define TSDB_DATA_INT_NULL 0x80000000L
#define TSDB_DATA_BIGINT_NULL 0x8000000000000000L
#define TSDB_DATA_INT_NULL 0x80000000LL
#define TSDB_DATA_BIGINT_NULL 0x8000000000000000LL
#define TSDB_DATA_TIMESTAMP_NULL TSDB_DATA_BIGINT_NULL
#define TSDB_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN
#define TSDB_DATA_DOUBLE_NULL 0x7FFFFF0000000000L // an NAN
#define TSDB_DATA_DOUBLE_NULL 0x7FFFFF0000000000LL // an NAN
#define TSDB_DATA_NCHAR_NULL 0xFFFFFFFF
#define TSDB_DATA_BINARY_NULL 0xFF
......@@ -108,8 +108,8 @@ extern const int32_t TYPE_BYTES[16];
#define TSDB_INS_USER_STABLES_DBNAME_COLID 2
#define TSDB_TICK_PER_SECOND(precision) \
((int64_t)((precision) == TSDB_TIME_PRECISION_MILLI ? 1e3L \
: ((precision) == TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L)))
((int64_t)((precision) == TSDB_TIME_PRECISION_MILLI ? 1000LL \
: ((precision) == TSDB_TIME_PRECISION_MICRO ? 1000000LL : 1000000000LL)))
#define T_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#define T_APPEND_MEMBER(dst, ptr, type, member) \
......@@ -134,7 +134,7 @@ typedef enum EOperatorType {
OP_TYPE_MINUS,
OP_TYPE_ASSIGN,
// bit operator
// bitwise operator
OP_TYPE_BIT_AND,
OP_TYPE_BIT_OR,
......@@ -443,8 +443,8 @@ enum {
#define VNODE_HANDLE -3
#define BNODE_HANDLE -4
#define TSDB_CONFIG_OPTION_LEN 16
#define TSDB_CONIIG_VALUE_LEN 48
#define TSDB_CONFIG_OPTION_LEN 32
#define TSDB_CONFIG_VALUE_LEN 64
#define TSDB_CONFIG_NUMBER 8
#define QUERY_ID_SIZE 20
......
......@@ -28,7 +28,7 @@ typedef struct SArray* SIDList;
typedef struct SPageInfo SPageInfo;
typedef struct SDiskbasedBuf SDiskbasedBuf;
#define DEFAULT_INTERN_BUF_PAGE_SIZE (1024L) // in bytes
#define DEFAULT_INTERN_BUF_PAGE_SIZE (1024LL) // in bytes
typedef struct SFilePage {
int32_t num;
......
......@@ -45,9 +45,11 @@ typedef struct STraceId {
#define TRACE_GET_MSGID(traceId) (traceId)->msgId
#define TRACE_TO_STR(traceId, buf) \
do { \
sprintf(buf, "0x%" PRIx64 ":0x%" PRIx64 "", traceId->rootId, traceId->msgId); \
#define TRACE_TO_STR(traceId, buf) \
do { \
int64_t rootId = (traceId) != NULL ? (traceId)->rootId : 0; \
int64_t msgId = (traceId) != NULL ? (traceId)->msgId : 0; \
sprintf(buf, "0x%" PRIx64 ":0x%" PRIx64 "", rootId, msgId); \
} while (0)
#ifdef __cplusplus
......
......@@ -32,7 +32,7 @@ extern "C" {
#define GET_UINT64_VAL(x) (*(uint64_t *)(x))
static FORCE_INLINE float taos_align_get_float(const char *pBuf) {
#if __STDC_VERSION__ >= 201112L
#if __STDC_VERSION__ >= 201112LL
static_assert(sizeof(float) == sizeof(uint32_t), "sizeof(float) must equal to sizeof(uint32_t)");
#else
assert(sizeof(float) == sizeof(uint32_t));
......@@ -43,7 +43,7 @@ static FORCE_INLINE float taos_align_get_float(const char *pBuf) {
}
static FORCE_INLINE double taos_align_get_double(const char *pBuf) {
#if __STDC_VERSION__ >= 201112L
#if __STDC_VERSION__ >= 201112LL
static_assert(sizeof(double) == sizeof(uint64_t), "sizeof(double) must equal to sizeof(uint64_t)");
#else
assert(sizeof(double) == sizeof(uint64_t));
......
......@@ -196,7 +196,6 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || :
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
......@@ -211,7 +210,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
if [ "$verMode" == "cluster" ]; then
......
......@@ -176,7 +176,6 @@ function install_bin() {
if [ "$osType" != "Darwin" ]; then
${csudo}rm -f ${bin_link_dir}/perfMonitor || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/run_taosd_and_taosadapter.sh || :
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}cp -r ${binary_dir}/build/bin/${clientName} ${install_main_dir}/bin || :
......@@ -191,7 +190,6 @@ function install_bin() {
${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin || :
${csudo}cp -r ${script_dir}/remove.sh ${install_main_dir}/bin || :
${csudo}cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin || :
${csudo}cp -r ${script_dir}/run_taosd_and_taosadapter.sh ${install_main_dir}/bin || :
${csudo}cp -r ${script_dir}/startPre.sh ${install_main_dir}/bin || :
${csudo}chmod 0555 ${install_main_dir}/bin/*
......@@ -204,7 +202,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/run_taosd_and_taosadapter.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_taosd_and_taosadapter.sh ${bin_link_dir}/run_taosd_and_taosadapter.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
else
......
......@@ -88,7 +88,6 @@ else
${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh \
${script_dir}/set_core.sh \
${script_dir}/run_taosd_and_taosadapter.sh \
${script_dir}/startPre.sh \
${script_dir}/taosd-dump-cfg.gdb"
fi
......@@ -152,7 +151,6 @@ if [ $adapterName != "taosadapter" ]; then
sed -i "s/taosadapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
mv ${install_dir}/bin/taosadapter ${install_dir}/bin/${adapterName}
mv ${install_dir}/bin/run_taosd_and_taosadapter.sh ${install_dir}/bin/run_${serverName}_and_${adapterName}.sh
mv ${install_dir}/bin/taosd-dump-cfg.gdb ${install_dir}/bin/${serverName}-dump-cfg.gdb
fi
......
#!/bin/bash
[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter &
taosd
......@@ -20,9 +20,9 @@
extern "C" {
#endif
#include "catalog.h"
#include "parser.h"
#include "planner.h"
#include "catalog.h"
#include "query.h"
#include "taos.h"
#include "tcommon.h"
......@@ -51,10 +51,17 @@ extern "C" {
enum {
RES_TYPE__QUERY = 1,
RES_TYPE__TMQ,
RES_TYPE__TMQ_META,
};
#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
#define SHOW_VARIABLES_RESULT_COLS 2
#define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
typedef struct SAppInstInfo SAppInstInfo;
......@@ -66,9 +73,9 @@ typedef struct {
int64_t reportBytes; // not implemented
int64_t startTime;
// ctl
SRWLatch lock; // lock is used in serialization
SRWLatch lock; // lock is used in serialization
SAppInstInfo* pAppInstInfo;
SHashObj* activeInfo; // hash<SClientHbKey, SClientHbReq>
SHashObj* activeInfo; // hash<SClientHbKey, SClientHbReq>
} SAppHbMgr;
typedef int32_t (*FHbRspHandle)(SAppHbMgr* pAppHbMgr, SClientHbRsp* pRsp);
......@@ -76,13 +83,13 @@ typedef int32_t (*FHbRspHandle)(SAppHbMgr* pAppHbMgr, SClientHbRsp* pRsp);
typedef int32_t (*FHbReqHandle)(SClientHbKey* connKey, void* param, SClientHbReq* req);
typedef struct {
int8_t inited;
int64_t appId;
int8_t inited;
int64_t appId;
// ctl
int8_t threadStop;
TdThread thread;
TdThreadMutex lock; // used when app init and cleanup
SHashObj *appSummary;
TdThreadMutex lock; // used when app init and cleanup
SHashObj* appSummary;
SArray* appHbMgrs; // SArray<SAppHbMgr*> one for each cluster
FHbReqHandle reqHandle[CONN_TYPE__MAX];
FHbRspHandle rspHandle[CONN_TYPE__MAX];
......@@ -102,6 +109,8 @@ typedef struct SHeartBeatInfo {
struct SAppInstInfo {
int64_t numOfConns;
SCorEpSet mgmtEp;
int32_t totalDnodes;
int32_t onlineDnodes;
TdThreadMutex qnodeMutex;
SArray* pQnodeList;
SAppClusterSummary summary;
......@@ -125,11 +134,12 @@ typedef struct STscObj {
char user[TSDB_USER_LEN];
char pass[TSDB_PASSWORD_LEN];
char db[TSDB_DB_FNAME_LEN];
char ver[128];
char sVer[TSDB_VERSION_LEN];
char sDetailVer[128];
int8_t connType;
int32_t acctId;
uint32_t connId;
TAOS *id; // ref ID returned by taosAddRef
TAOS* id; // ref ID returned by taosAddRef
TdThreadMutex mutex; // used to protect the operation on db
int32_t numOfReqs; // number of sqlObj bound to this connection
SAppInstInfo* pAppInfo;
......@@ -188,6 +198,14 @@ typedef struct {
SReqResultInfo resInfo;
} SMqRspObj;
typedef struct {
int8_t resType;
char topic[TSDB_TOPIC_FNAME_LEN];
char db[TSDB_DB_FNAME_LEN];
int32_t vgId;
SMqMetaRsp metaRsp;
} SMqMetaRspObj;
typedef struct SRequestObj {
int8_t resType; // query or tmq
uint64_t requestId;
......@@ -205,10 +223,11 @@ typedef struct SRequestObj {
SQueryExecMetric metric;
SRequestSendRecvBody body;
bool stableQuery;
bool validateOnly;
bool killed;
uint32_t prevCode; //previous error code: todo refactor, add update flag for catalog
uint32_t retry;
bool killed;
uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog
uint32_t retry;
} SRequestObj;
typedef struct SSyncQueryParam {
......@@ -216,16 +235,21 @@ typedef struct SSyncQueryParam {
SRequestObj* pRequest;
} SSyncQueryParam;
void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
void doSetOneRowPtr(SReqResultInfo* pResultInfo);
void setResPrecision(SReqResultInfo* pResInfo, int32_t precision);
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4,
bool freeAfterUse);
void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols);
void doFreeReqResultInfo(SReqResultInfo* pResInfo);
int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq);
void syncCatalogFn(SMetaData* pResult, void* param, int32_t code);
void doSetOneRowPtr(SReqResultInfo* pResultInfo);
void setResPrecision(SReqResultInfo* pResInfo, int32_t precision);
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4,
bool freeAfterUse);
void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols);
void doFreeReqResultInfo(SReqResultInfo* pResInfo);
SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen);
SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen, bool validateOnly);
TAOS_RES *taosQueryImpl(TAOS *taos, const char *sql, bool validateOnly);
void taosAsyncQueryImpl(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param, bool validateOnly);
static FORCE_INLINE SReqResultInfo* tmqGetCurResInfo(TAOS_RES* res) {
SMqRspObj* msg = (SMqRspObj*)res;
......@@ -289,9 +313,9 @@ bool persistConnForSpecificMsg(void* parenct, tmsg_t msgType);
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet);
STscObj* taos_connect_internal(const char* ip, const char* user, const char* pass, const char* auth, const char* db,
uint16_t port, int connType);
uint16_t port, int connType);
SRequestObj* launchQuery(STscObj* pTscObj, const char* sql, int sqlLen);
SRequestObj* launchQuery(STscObj* pTscObj, const char* sql, int sqlLen, bool validateOnly);
int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtCallback* pStmtCb);
......@@ -299,7 +323,7 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra
int32_t buildRequest(STscObj* pTscObj, const char* sql, int sqlLen, SRequestObj** pRequest);
void taos_close_internal(void *taos);
void taos_close_internal(void* taos);
// --- heartbeat
// global, called by mgmt
......@@ -320,12 +344,12 @@ void hbMgrInitMqHbRspHandle();
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQuery, void** res);
int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList);
void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData *pResultMeta);
void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta);
int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest);
int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList);
void doAsyncQuery(SRequestObj* pRequest, bool forceUpdateMeta);
int32_t removeMeta(STscObj* pTscObj, SArray* tbList);// todo move to clientImpl.c and become a static function
int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog);// todo move to xxx
int32_t removeMeta(STscObj* pTscObj, SArray* tbList); // todo move to clientImpl.c and become a static function
int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog); // todo move to xxx
bool qnodeRequired(SRequestObj* pRequest);
#ifdef __cplusplus
......
......@@ -13,11 +13,11 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "os.h"
#include "catalog.h"
#include "functionMgt.h"
#include "clientInt.h"
#include "clientLog.h"
#include "functionMgt.h"
#include "os.h"
#include "query.h"
#include "scheduler.h"
#include "tcache.h"
......@@ -38,7 +38,7 @@ static TdThreadOnce tscinit = PTHREAD_ONCE_INIT;
volatile int32_t tscInitRes = 0;
static void registerRequest(SRequestObj *pRequest) {
STscObj *pTscObj = acquireTscObj(*(int64_t*)pRequest->pTscObj->id);
STscObj *pTscObj = acquireTscObj(*(int64_t *)pRequest->pTscObj->id);
assert(pTscObj != NULL);
......@@ -54,14 +54,14 @@ static void registerRequest(SRequestObj *pRequest) {
int32_t currentInst = atomic_add_fetch_64((int64_t *)&pSummary->currentRequests, 1);
tscDebug("0x%" PRIx64 " new Request from connObj:0x%" PRIx64
", current:%d, app current:%d, total:%d, reqId:0x%" PRIx64,
pRequest->self, *(int64_t*)pRequest->pTscObj->id, num, currentInst, total, pRequest->requestId);
pRequest->self, *(int64_t *)pRequest->pTscObj->id, num, currentInst, total, pRequest->requestId);
}
}
static void deregisterRequest(SRequestObj *pRequest) {
assert(pRequest != NULL);
STscObj *pTscObj = pRequest->pTscObj;
STscObj * pTscObj = pRequest->pTscObj;
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
......@@ -70,8 +70,8 @@ static void deregisterRequest(SRequestObj *pRequest) {
int64_t duration = taosGetTimestampUs() - pRequest->metric.start;
tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%" PRIu64
" ms, current:%d, app current:%d",
pRequest->self, *(int64_t*)pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
releaseTscObj(*(int64_t*)pTscObj->id);
pRequest->self, *(int64_t *)pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
releaseTscObj(*(int64_t *)pTscObj->id);
}
// todo close the transporter properly
......@@ -80,12 +80,16 @@ void closeTransporter(STscObj *pTscObj) {
return;
}
tscDebug("free transporter:%p in connObj: 0x%" PRIx64, pTscObj->pAppInfo->pTransporter, *(int64_t*)pTscObj->id);
tscDebug("free transporter:%p in connObj: 0x%" PRIx64, pTscObj->pAppInfo->pTransporter, *(int64_t *)pTscObj->id);
rpcClose(pTscObj->pAppInfo->pTransporter);
}
static bool clientRpcRfp(int32_t code) {
if (code == TSDB_CODE_RPC_REDIRECT) {
static bool clientRpcRfp(int32_t code, tmsg_t msgType) {
if (code == TSDB_CODE_RPC_REDIRECT || code == TSDB_CODE_RPC_NETWORK_UNAVAIL || code == TSDB_CODE_NODE_NOT_DEPLOYED ||
code == TSDB_CODE_SYN_NOT_LEADER || code == TSDB_CODE_APP_NOT_READY) {
if (msgType == TDMT_VND_QUERY || msgType == TDMT_VND_FETCH) {
return false;
}
return true;
} else {
return false;
......@@ -128,16 +132,17 @@ void closeAllRequests(SHashObj *pRequests) {
void destroyTscObj(void *pObj) {
STscObj *pTscObj = pObj;
SClientHbKey connKey = {.tscRid = *(int64_t*)pTscObj->id, .connType = pTscObj->connType};
SClientHbKey connKey = {.tscRid = *(int64_t *)pTscObj->id, .connType = pTscObj->connType};
hbDeregisterConn(pTscObj->pAppInfo->pAppHbMgr, connKey);
int64_t connNum = atomic_sub_fetch_64(&pTscObj->pAppInfo->numOfConns, 1);
closeAllRequests(pTscObj->pRequests);
schedulerStopQueryHb(pTscObj->pAppInfo->pTransporter);
if (0 == connNum) {
// TODO
//closeTransporter(pTscObj);
// TODO
// closeTransporter(pTscObj);
}
tscDebug("connObj 0x%" PRIx64 " destroyed, totalConn:%" PRId64, *(int64_t*)pTscObj->id, pTscObj->pAppInfo->numOfConns);
tscDebug("connObj 0x%" PRIx64 " destroyed, totalConn:%" PRId64, *(int64_t *)pTscObj->id,
pTscObj->pAppInfo->numOfConns);
taosThreadMutexDestroy(&pTscObj->mutex);
taosMemoryFreeClear(pTscObj);
}
......@@ -167,10 +172,10 @@ void *createTscObj(const char *user, const char *auth, const char *db, int32_t c
taosThreadMutexInit(&pObj->mutex, NULL);
pObj->id = taosMemoryMalloc(sizeof(int64_t));
*(int64_t*)pObj->id = taosAddRef(clientConnRefPool, pObj);
*(int64_t *)pObj->id = taosAddRef(clientConnRefPool, pObj);
pObj->schemalessType = 1;
tscDebug("connObj created, 0x%" PRIx64, *(int64_t*)pObj->id);
tscDebug("connObj created, 0x%" PRIx64, *(int64_t *)pObj->id);
return pObj;
}
......@@ -325,7 +330,7 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
return 0;
}
SConfig *pCfg = taosGetCfg();
SConfig * pCfg = taosGetCfg();
SConfigItem *pItem = NULL;
switch (option) {
......
......@@ -161,6 +161,9 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, &pRsp->query->epSet);
}
pTscObj->pAppInfo->totalDnodes = pRsp->query->totalDnodes;
pTscObj->pAppInfo->onlineDnodes = pRsp->query->onlineDnodes;
pTscObj->connId = pRsp->query->connId;
if (pRsp->query->killRid) {
......
此差异已折叠。
此差异已折叠。
......@@ -2389,17 +2389,19 @@ static int32_t isSchemalessDb(STscObj *taos, SRequestObj *request) {
static void smlInsertCallback(void *param, void *res, int32_t code) {
SRequestObj *pRequest = (SRequestObj *)res;
SSmlHandle *info = (SSmlHandle *)param;
int32_t rows = taos_affected_rows(pRequest);
uDebug("SML:0x%" PRIx64 " result. code:%d, msg:%s", info->id, pRequest->code, pRequest->msgBuf);
// lock
taosThreadSpinLock(&info->params->lock);
info->params->request->body.resInfo.numOfRows += rows;
if (code != TSDB_CODE_SUCCESS) {
taosThreadSpinLock(&info->params->lock);
info->params->request->code = code;
taosThreadSpinUnlock(&info->params->lock);
}
taosThreadSpinUnlock(&info->params->lock);
// unlock
printf("SML:0x%" PRIx64 " insert finished, code: %d, total: %d\n", info->id, code, info->affectedRows);
uDebug("SML:0x%" PRIx64 " insert finished, code: %d, rows: %d, total: %d", info->id, code, rows, info->affectedRows);
Params *pParam = info->params;
bool isLast = info->isLast;
info->cost.endTime = taosGetTimestampUs();
......
此差异已折叠。
此差异已折叠。
......@@ -1272,10 +1272,10 @@ TEST(testCase, sml_dup_time_Test) {
const char *sql[] = {
//"test_ms,t0=t c0=f 1626006833641",
"ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=false,c1=1i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xcxvwjvf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
"ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=T,c1=2i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"fixrzcuq\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
"ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=t,c1=3i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"iupzdqub\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
"ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=t,c1=4i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"yvvtzzof\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
"ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=t,c1=5i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vbxpilkj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000"
"ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=T,c1=2i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"fixrzcuq\",c8=L\"ncharColValue\",c9=7u64 1626006834639000000",
"ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=t,c1=3i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"iupzdqub\",c8=L\"ncharColValue\",c9=7u64 1626006835639000000",
"ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=t,c1=4i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"yvvtzzof\",c8=L\"ncharColValue\",c9=7u64 1626006836639000000",
"ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=t,c1=5i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vbxpilkj\",c8=L\"ncharColValue\",c9=7u64 1626006837639000000"
};
pRes = taos_query(taos, "use dup_time");
taos_free_result(pRes);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册