提交 3dbaf255 编写于 作者: B Benguang Zhao

Merge branch '2.6' into fix/TS-1601-V26

# [Choice] Ubuntu version (use jammy or bionic on local arm64/Apple Silicon): jammy, focal, bionic
ARG VARIANT="bullseye"
FROM mcr.microsoft.com/vscode/devcontainers/base:0-${VARIANT}
# FROM ubuntu:20.04
# Options for setup script
# ARG INSTALL_ZSH="true"
# ARG UPGRADE_PACKAGES="true"
ADD sources.list /etc/apt/
RUN apt-get update && apt-get -y install tree vim tmux python3-pip gcc cmake build-essential git gdb
\ No newline at end of file
{
"name": "Ubuntu",
"build": {
"dockerfile": "Dockerfile",
// Update 'VARIANT' to pick an Ubuntu version: jammy / ubuntu-22.04, focal / ubuntu-20.04, bionic /ubuntu-18.04
// Use ubuntu-22.04 or ubuntu-18.04 on local arm64/Apple Silicon.
"args": { "VARIANT": "ubuntu-20.04" }
},
"runArgs": [
"--cap-add=SYS_PTRACE",
"--security-opt",
"seccomp=unconfined"
],
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
// "postCreateCommand": "uname -a",
// Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
"remoteUser": "root",
"extensions": [
"ms-vscode.cpptools",
"ms-vscode.cmake-tools",
"austin.code-gnu-global",
"visualstudioexptteam.vscodeintel",
"eamodio.gitlens",
"matepek.vscode-catch2-test-adapter",
"spmeesseman.vscode-taskexplorer",
"cschlosser.doxdocgen",
"urosvujosevic.explorer-manager"
]
}
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal main restricted
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates main restricted
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal universe
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates universe
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal multiverse
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates multiverse
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-backports main restricted universe multiverse
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security main restricted
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security universe
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security multiverse
\ No newline at end of file
......@@ -5,6 +5,8 @@ build/
cmake-build-debug/
cmake-build-release/
cscope.out
cscope.files
tags
.DS_Store
debug/
release/
......
......@@ -7,7 +7,8 @@ def sync_source() {
sh '''
hostname
date
'''
env
'''
sh '''
cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
......@@ -57,6 +58,7 @@ def sync_source() {
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git pull >/dev/null
git clean -dfx
git log -5
'''
script {
if (env.CHANGE_TARGET == 'master') {
......@@ -90,6 +92,7 @@ def sync_source() {
cd ${WK}
git pull >/dev/null
git clean -dfx
git log -5
'''
script {
if (env.CHANGE_URL =~ /\/TDengine\//) {
......@@ -98,16 +101,13 @@ def sync_source() {
cd ${WKC}
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
if [ ! -d src/connector/python/.github ]; then
rm -rf src/connector/python/* || :
rm -rf src/connector/python/.* || :
git clone --depth 1 https://github.com/taosdata/taos-connector-python src/connector/python || echo "failed to clone python connector"
else
cd src/connector/python || echo "src/connector/python not exist"
git pull || :
cd ${WKC}
fi
git log -5
'''
sh '''
cd ${WKC}
rm -rf src/connector/python
mkdir -p src/connector/python
git clone --depth 1 https://github.com/taosdata/taos-connector-python src/connector/python || echo "failed to clone python connector"
'''
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
sh '''
......@@ -115,16 +115,13 @@ def sync_source() {
cd ${WK}
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
if [ ! -d community/src/connector/python/.github ]; then
rm -rf community/src/connector/python/* || :
rm -rf community/src/connector/python/.* || :
git clone --depth 1 https://github.com/taosdata/taos-connector-python community/src/connector/python || echo "failed to clone python connector"
else
cd community/src/connector/python || echo "community/src/connector/python not exist"
git pull || :
cd ${WK}
fi
git log -5
'''
sh '''
cd ${WKC}
rm -rf src/connector/python
mkdir -p src/connector/python
git clone --depth 1 https://github.com/taosdata/taos-connector-python src/connector/python || echo "failed to clone python connector"
'''
} else {
sh '''
......@@ -136,16 +133,6 @@ def sync_source() {
cd ${WKC}
git submodule update --init --recursive
'''
sh '''
cd ${WKC}
git branch
git log -5
'''
sh '''
cd ${WK}
git branch
git log -5
'''
}
def pre_test() {
sync_source()
......@@ -157,6 +144,7 @@ def pre_test() {
go env -w GO111MODULE=on
cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true > /dev/null
make -j8 >/dev/null
make install
'''
return 1
}
......@@ -173,8 +161,155 @@ def pre_test_mac() {
'''
return 1
}
def pre_test_win(){
bat '''
hostname
ipconfig
set
date /t
time /t
taskkill /f /t /im python.exe
taskkill /f /t /im bash.exe
taskkill /f /t /im taosd.exe
rd /s /Q %WIN_INTERNAL_ROOT%\\debug || echo "no debug folder"
echo "clean environment done"
exit 0
'''
bat '''
cd %WIN_INTERNAL_ROOT%
git reset --hard
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git reset --hard
'''
script {
if (env.CHANGE_TARGET == 'master') {
bat '''
cd %WIN_INTERNAL_ROOT%
git checkout master
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git checkout master
'''
} else if (env.CHANGE_TARGET == '2.0') {
bat '''
cd %WIN_INTERNAL_ROOT%
git checkout 2.0
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git checkout 2.0
'''
} else if (env.CHANGE_TARGET == '2.4') {
bat '''
cd %WIN_INTERNAL_ROOT%
git checkout 2.4
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git checkout 2.4
'''
} else if (env.CHANGE_TARGET == '2.6') {
bat '''
cd %WIN_INTERNAL_ROOT%
git checkout 2.6
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git checkout 2.6
'''
} else {
bat '''
cd %WIN_INTERNAL_ROOT%
git checkout develop
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git checkout develop
'''
}
}
bat '''
cd %WIN_INTERNAL_ROOT%
git pull
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git remote prune origin
git pull
'''
bat '''
cd %WIN_INTERNAL_ROOT%
git branch
git log -5
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git branch
git log -5
'''
script {
if (env.CHANGE_URL =~ /\/TDengine\//) {
bat '''
echo "match /TDengine/ repository"
cd %WIN_COMMUNITY_ROOT%
git fetch origin +refs/pull/%CHANGE_ID%/merge
git checkout -qf FETCH_HEAD
git log -5
'''
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
bat '''
echo "match /TDinternal/ repository"
cd %WIN_INTERNAL_ROOT%
git fetch origin +refs/pull/%CHANGE_ID%/merge
git checkout -qf FETCH_HEAD
git log -5
'''
} else {
bat '''
echo "unmatched reposiotry %CHANGE_URL%"
'''
}
}
bat '''
cd %WIN_COMMUNITY_ROOT%
git submodule update --init --recursive
'''
/*bat '''
cd %WIN_CONNECTOR_ROOT%
git branch
git reset --hard
git pull
'''
bat '''
cd %WIN_CONNECTOR_ROOT%
git log -5
'''*/
}
def pre_test_build_win() {
bat '''
echo "building ..."
time /t
cd %WIN_INTERNAL_ROOT%
mkdir debug
cd debug
time /t
call "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat" x64
set CL=/MP8
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake"
time /t
cmake .. -G "NMake Makefiles JOM" || exit 7
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6"
time /t
jom -j 6 || exit 8
time /t
'''
return 1
}
pipeline {
agent {label " dispatcher "}
agent none
options { skipDefaultCheckout() }
environment{
WK = '/var/data/jenkins/workspace/TDinternal'
......@@ -182,18 +317,7 @@ pipeline {
LOGDIR = '/var/data/jenkins/workspace/log'
}
stages {
stage ('pre_build') {
steps {
sh '''
date
pwd
env
hostname
'''
}
}
stage ('Parallel build stage') {
//only build pr
stage('run test') {
options { skipDefaultCheckout() }
when {
allOf {
......@@ -202,103 +326,104 @@ pipeline {
}
}
parallel {
stage ('dispatcher sync source') {
steps {
timeout(time: 20, unit: 'MINUTES') {
sync_source()
script {
sh '''
echo "dispatcher ready"
date
'''
}
}
}
}
stage ('build worker01') {
agent {label " worker01 "}
stage ('build arm64') {
agent {label " worker07_arm64 || worker09_arm64 "}
steps {
timeout(time: 20, unit: 'MINUTES') {
pre_test()
script {
sh '''
echo "worker01 build done"
echo "arm64 build done"
date
'''
}
}
}
}
stage ('build worker02') {
agent {label " worker02 "}
stage ('build Mac') {
agent {label " Mac_catalina "}
steps {
timeout(time: 20, unit: 'MINUTES') {
pre_test()
pre_test_mac()
script {
sh '''
echo "worker02 build done"
echo "Mac build done"
date
'''
}
}
}
}
}
}
stage('run test') {
options { skipDefaultCheckout() }
when {
allOf {
changeRequest()
not { expression { env.CHANGE_BRANCH =~ /docs\// }}
}
}
parallel {
stage ('build worker07_arm64') {
agent {label " worker07_arm64 "}
steps {
timeout(time: 20, unit: 'MINUTES') {
pre_test()
script {
sh '''
echo "worker07_arm64 build done"
date
'''
}
}
stage('build win') {
agent {label " windows10_05 || windows10_06 "}
environment{
WIN_INTERNAL_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal"
WIN_COMMUNITY_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community"
WIN_SYSTEM_TEST_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community\\tests\\system-test"
WIN_CONNECTOR_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\taos-connector-python"
}
}
stage ('build Mac_catalina ') {
agent {label " Mac_catalina "}
steps {
timeout(time: 20, unit: 'MINUTES') {
pre_test_mac()
script {
sh '''
echo "Mac_catalina build done"
date
'''
}
pre_test_win()
pre_test_build_win()
}
}
}
stage('run cases') {
agent {label " worker01 || worker02 "}
steps {
sh '''
date
pwd
hostname
'''
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 20, unit: 'MINUTES') {
timeout(time: 15, unit: 'MINUTES') {
pre_test()
script {
sh '''
echo "Linux build done"
date
cd ${WKC}/tests/parallel_test
time ./run.sh -m m.json -t cases.task -l ${LOGDIR} -b ${BRANCH_NAME}
date
hostname
'''
}
}
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 60, unit: 'MINUTES') {
script {
def extra_param = ""
def log_server_file = "/home/log_server.json"
def timeout_cmd = ""
if (fileExists(log_server_file)) {
def log_server_enabled = sh (
script: 'jq .enabled ' + log_server_file,
returnStdout: true
).trim()
def timeout_param = sh (
script: 'jq .timeout ' + log_server_file,
returnStdout: true
).trim()
if (timeout_param != "null" && timeout_param != "0") {
timeout_cmd = "timeout " + timeout_param
}
if (log_server_enabled == "1") {
def log_server = sh (
script: 'jq .server ' + log_server_file + ' | sed "s/\\\"//g"',
returnStdout: true
).trim()
if (log_server != "null" && log_server != "") {
extra_param = "-w " + log_server
}
}
}
sh '''
date
cd ${WKC}/tests/parallel_test
''' + timeout_cmd + ''' time ./run.sh -m /home/m.json -t cases.task -l ${LOGDIR} -b ${BRANCH_NAME} ''' + extra_param + '''
date
hostname
'''
}
}
}
}
}
}
......
......@@ -54,14 +54,14 @@ Database changed.
taos> show vgroups;
vgId | tables | status | onlines | v1_dnode | v1_status | compacting |
==========================================================================================
14 | 38000 | ready | 1 | 1 | master | 0 |
15 | 38000 | ready | 1 | 1 | master | 0 |
16 | 38000 | ready | 1 | 1 | master | 0 |
17 | 38000 | ready | 1 | 1 | master | 0 |
18 | 37001 | ready | 1 | 1 | master | 0 |
19 | 37000 | ready | 1 | 1 | master | 0 |
20 | 37000 | ready | 1 | 1 | master | 0 |
21 | 37000 | ready | 1 | 1 | master | 0 |
14 | 38000 | ready | 1 | 1 | leader | 0 |
15 | 38000 | ready | 1 | 1 | leader | 0 |
16 | 38000 | ready | 1 | 1 | leader | 0 |
17 | 38000 | ready | 1 | 1 | leader | 0 |
18 | 37001 | ready | 1 | 1 | leader | 0 |
19 | 37000 | ready | 1 | 1 | leader | 0 |
20 | 37000 | ready | 1 | 1 | leader | 0 |
21 | 37000 | ready | 1 | 1 | leader | 0 |
Query OK, 8 row(s) in set (0.001154s)
```
......@@ -161,14 +161,14 @@ First `show vgroups` is executed to show the vgroup distribution.
taos> show vgroups;
vgId | tables | status | onlines | v1_dnode | v1_status | compacting |
==========================================================================================
14 | 38000 | ready | 1 | 3 | master | 0 |
15 | 38000 | ready | 1 | 3 | master | 0 |
16 | 38000 | ready | 1 | 3 | master | 0 |
17 | 38000 | ready | 1 | 3 | master | 0 |
18 | 37001 | ready | 1 | 3 | master | 0 |
19 | 37000 | ready | 1 | 1 | master | 0 |
20 | 37000 | ready | 1 | 1 | master | 0 |
21 | 37000 | ready | 1 | 1 | master | 0 |
14 | 38000 | ready | 1 | 3 | leader | 0 |
15 | 38000 | ready | 1 | 3 | leader | 0 |
16 | 38000 | ready | 1 | 3 | leader | 0 |
17 | 38000 | ready | 1 | 3 | leader | 0 |
18 | 37001 | ready | 1 | 3 | leader | 0 |
19 | 37000 | ready | 1 | 1 | leader | 0 |
20 | 37000 | ready | 1 | 1 | leader | 0 |
21 | 37000 | ready | 1 | 1 | leader | 0 |
Query OK, 8 row(s) in set (0.001314s)
```
......@@ -191,14 +191,14 @@ Query OK, 0 row(s) in set (0.000575s)
taos> show vgroups;
vgId | tables | status | onlines | v1_dnode | v1_status | v2_dnode | v2_status | compacting |
=================================================================================================================
14 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
15 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
16 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
17 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
18 | 37001 | ready | 2 | 1 | slave | 3 | master | 0 |
19 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 |
20 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 |
21 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 |
14 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
15 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
16 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
17 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
18 | 37001 | ready | 2 | 1 | follower | 3 | leader | 0 |
19 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
20 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
21 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
Query OK, 8 row(s) in set (0.001242s)
```
......@@ -207,7 +207,7 @@ It can be seen from above output that vgId 18 has been moved from dnode 3 to dno
:::note
- Manual load balancing can only be performed when the automatic load balancing is disabled, i.e. `balance` is set to 0.
- Only a vnode in normal state, i.e. master or slave, can be moved. vnode can't be moved when its in status offline, unsynced or syncing.
- Only a vnode in normal state, i.e. leader or follower, can be moved. vnode can't be moved when its in status offline, unsynced or syncing.
- Before moving a vnode, it's necessary to make sure the target dnode has enough resources: CPU, memory and disk.
:::
......@@ -27,7 +27,7 @@ There may be multiple dnodes in a cluster, but only one mnode can be started in
SHOW MNODES;
```
The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched.
The end point and role/status (leader, follower, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched.
For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher.
......@@ -58,13 +58,13 @@ When a dnode is offline, it can be detected by the TDengine cluster. There are t
- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster.
:::note
If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service.
If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the leader node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service.
:::
## Arbitrator
The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a master node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc.
The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a leader node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc.
To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally.
......
......@@ -21,7 +21,7 @@ The following example is in an Ubuntu environment and uses the `curl` tool to ve
The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
```html
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql
```
The following return value results indicate that the verification passed.
......@@ -106,13 +106,13 @@ The HTTP request's BODY is a complete SQL command, and the data table in the SQL
Use `curl` to initiate an HTTP request with a custom authentication method, with the following syntax.
```bash
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
```
Or
```bash
curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
```
where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`.
......@@ -192,7 +192,7 @@ Response body:
- query all records from table d1001 of database demo
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
```
Response body:
......@@ -218,7 +218,7 @@ Response body:
- Create database demo:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql
```
Response body:
......@@ -240,7 +240,7 @@ Response body:
When the HTTP request URL uses `/rest/sqlt`, the returned result set's timestamp value will be in Unix timestamp format, for example:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt
```
Response body:
......@@ -268,7 +268,7 @@ Response body:
When the HTTP request URL uses `/rest/sqlutc`, the timestamp of the returned result set will be expressed as a UTC format, for example:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc
```
Response body:
......
......@@ -211,7 +211,7 @@
],
"timeFrom": null,
"timeShift": null,
"title": "Master MNode",
"title": "Leader MNode",
"transformations": [
{
"id": "filterByValue",
......@@ -221,7 +221,7 @@
"config": {
"id": "regex",
"options": {
"value": "master"
"value": "leader"
}
},
"fieldName": "role"
......@@ -300,7 +300,7 @@
],
"timeFrom": null,
"timeShift": null,
"title": "Master MNode Create Time",
"title": "Leader MNode Create Time",
"transformations": [
{
"id": "filterByValue",
......@@ -310,7 +310,7 @@
"config": {
"id": "regex",
"options": {
"value": "master"
"value": "leader"
}
},
"fieldName": "role"
......
......@@ -153,7 +153,7 @@
],
"timeFrom": null,
"timeShift": null,
"title": "Master MNode",
"title": "Leader MNode",
"transformations": [
{
"id": "filterByValue",
......@@ -163,7 +163,7 @@
"config": {
"id": "regex",
"options": {
"value": "master"
"value": "leader"
}
},
"fieldName": "role"
......@@ -246,7 +246,7 @@
],
"timeFrom": null,
"timeShift": null,
"title": "Master MNode Create Time",
"title": "Leader MNode Create Time",
"transformations": [
{
"id": "filterByValue",
......@@ -256,7 +256,7 @@
"config": {
"id": "regex",
"options": {
"value": "master"
"value": "leader"
}
},
"fieldName": "role"
......
......@@ -274,8 +274,8 @@ Details of the metrics are as follows.
This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom).
- **First EP**: the `firstEp` setting in the current TDengine cluster.
- **Version**: TDengine server version (master mnode).
- **Master Uptime**: The time elapsed since the current Master MNode was elected as Master.
- **Version**: TDengine server version (leader mnode).
- **Leader Uptime**: The time elapsed since the current Leader MNode was elected as Leader.
- **Expire Time** - Enterprise version expiration time.
- **Used Measuring Points** - The number of measuring points used by the Enterprise Edition.
- **Databases** - The number of databases.
......@@ -333,7 +333,7 @@ Data node resource usage display with repeated multiple rows for the variable `$
2. **Has MNodes?**: whether the current dnode is a mnode.
3. **CPU Cores**: the number of CPU cores.
4. **VNodes Number**: the number of VNodes in the current dnode.
5. **VNodes Masters**: the number of vnodes in the master role.
5. **VNodes Masters**: the number of vnodes in the leader role.
6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes.
7. **Current Memory Usage of taosd**: memory usage of taosd processes.
8. **Disk Used**: The total disk usage percentage of the taosd data directory.
......
......@@ -26,7 +26,6 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d
- _remove.sh_: script to uninstall TDengine, please execute it carefully, link to the **rmtaos** command in the /usr/bin directory. Will remove the TDengine installation directory `/usr/local/taos`, but will keep `/etc/taos`, `/var/lib/taos`, `/var/log/taos`
- _taosadapter_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares
- _tarbitrator_: provides arbitration for two-node cluster deployments
- _run_taosd_and_taosadapter.sh_: script to start both taosd and taosAdapter
- _TDinsight.sh_: script to download TDinsight and install it
- _set_core.sh_: script for setting up the system to generate core dump files for easy debugging
- _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution.
......
......@@ -22,9 +22,9 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node.
**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction.
**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The leader/follower mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the leader. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction.
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a leader/follower mechanism. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
**TAOSC**: TAOSC is the driver provided by TDengine to applications. It is responsible for dealing with the interaction between application and cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster.
......@@ -62,13 +62,13 @@ To explain the relationship between vnode, mnode, TAOSC and application and thei
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
2. TAOSC checks the cache to see if meta data exists for the table. If it does, it goes straight to Step 4. If not, TAOSC sends a get meta-data request to mnode.
3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode.
4. TAOSC initiates an insert request to master vnode.
4. TAOSC initiates an insert request to leader vnode.
5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup.
6. TAOSC notifies APP that writing is successful.
For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to obtain meta-data to the EP of another mnode.
For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of master node.
For Step 4 and 5, without caching, TAOSC can't recognize the leader in the virtual node group, so assumes that the first vnode is the leader and sends a request to it. If this vnode is not the leader, it will reply to the actual leader as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of leader node.
The above describes the process of inserting data. The processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications.
......@@ -119,65 +119,65 @@ The load balancing process does not require any manual intervention, and it is t
## Data Writing and Replication Process
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect.
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect.
### Master vnode Writing Process
### Leader vnode Writing Process
Master Vnode uses a writing process as follows:
Leader Vnode uses a writing process as follows:
![TDengine Database Master Writing Process](write_master.webp)
<center> Figure 3: TDengine Master writing process </center>
![TDengine Database Leader Writing Process](write_master.webp)
<center> Figure 3: TDengine Leader writing process </center>
1. Master vnode receives the application data insertion request, verifies, and moves to next step;
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data;
3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Write into memory and add the record to “skip list”;
5. Master vnode returns a confirmation message to the application, indicating a successful write.
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
### Slave vnode Writing Process
### Follower vnode Writing Process
For a slave vnode, the write process as follows:
For a follower vnode, the write process as follows:
![TDengine Database Slave Writing Process](write_slave.webp)
<center> Figure 4: TDengine Slave Writing Process </center>
![TDengine Database Follower Writing Process](write_slave.webp)
<center> Figure 4: TDengine Follower Writing Process </center>
1. Slave vnode receives a data insertion request forwarded by Master vnode;
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
3. Write into memory and add the record to “skip list”.
Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same.
Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same.
### Remote Disaster Recovery and IDC (Internet Data Center) Migration
As discussed above, TDengine writes using Master and Slave processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
As discussed above, TDengine writes using Leader and Follower processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, leader and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
However, the asynchronous replication has a very low probability scenario where data may be lost. The specific scenario is as follows:
1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down;
2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2;
3. Slave vnode will become the new master, thus losing one record.
1. Leader vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down;
2. Follower vnode receives the write request, then processing fails before writing to the log in Step 2;
3. Follower vnode will become the new leader, thus losing one record.
In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this is an extremely low probability scenario as described above.
Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet**
### Master/slave Selection
### Leader/follower Selection
Vnode maintains a version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
When a vnode starts, the roles (master, slave) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a master-selection process. The rules are as follows:
When a vnode starts, the roles (leader, follower) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a leader-selection process. The rules are as follows:
1. If there’s only one replica, it’s always master
2. When all replicas are online, the one with latest version is master
3. Over half of online nodes are virtual nodes, and some virtual node is slave, it will automatically become master
4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master.
1. If there’s only one replica, it’s always leader
2. When all replicas are online, the one with latest version is leader
3. Over half of online nodes are virtual nodes, and some virtual node is follower, it will automatically become leader
4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as leader.
### Synchronous Replication
For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application.
For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Leader forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in follower. If “quorum-1” reply confirms are not received within a certain period of time, the leader vnode will return an error to the application.
With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistency, the default for data synchronization between mnodes is synchronous replication.
......
......@@ -109,7 +109,7 @@ taos>
It's also able to access the REST interface provided by TDengine in container from the host.
```
curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
```
Output is like below:
......@@ -147,7 +147,7 @@ docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-604
- Verify the REST interface:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" 127.0.0.1:6041/rest/sql
```
Below is an example output:
......
......@@ -2,6 +2,8 @@
title: 2.6
---
[2.6.0.6](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.6)
[2.6.0.4](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.4)
[2.6.0.1](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.1)
......
......@@ -2,6 +2,8 @@
title: 2.4
---
[2.4.0.30](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.30)
[2.4.0.26](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.26)
[2.4.0.25](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.25)
......
......@@ -26,7 +26,7 @@ public class LineProtocolExample {
private static void createDatabase(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
// the default precision is ms (microsecond), but we use us(microsecond) here.
// the default precision is ms (millisecond), but we use us(microsecond) here.
stmt.execute("CREATE DATABASE IF NOT EXISTS test PRECISION 'us'");
stmt.execute("USE test");
}
......
......@@ -21,7 +21,7 @@ RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安
下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号:
```html
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql
```
返回值结果如下表示验证通过:
......@@ -106,13 +106,13 @@ HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据
使用 `curl` 通过自定义身份认证方式来发起一个 HTTP Request,语法如下:
```bash
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
```
或者
```bash
curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
```
其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`
......@@ -192,7 +192,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
- 在 demo 库里查询表 d1001 的所有记录:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
```
返回值:
......@@ -218,7 +218,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
- 创建库 demo:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql
```
返回值:
......@@ -240,7 +240,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
HTTP 请求 URL 采用 `/rest/sqlt` 时,返回结果集的时间戳将采用 Unix 时间戳格式表示,例如
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt
```
返回结果:
......@@ -268,7 +268,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001
HTTP 请求 URL 采用 `/rest/sqlutc` 时,返回结果集的时间戳将采用 UTC 时间字符串表示,例如
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc
```
返回值:
......
......@@ -26,7 +26,6 @@ TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- _remove.sh_:卸载 TDengine 的脚本,请谨慎执行,链接到/usr/bin 目录下的**rmtaos**命令。会删除 TDengine 的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos
- _taosadapter_: 提供 RESTful 服务和接受其他多种软件写入请求的服务端可执行文件
- _tarbitrator_: 提供双节点集群部署的仲裁功能
- _run_taosd_and_taosadapter.sh_:同时启动 taosd 和 taosAdapter 的脚本
- _TDinsight.sh_:用于下载 TDinsight 并安装的脚本
- _set_core.sh_:用于方便调试设置系统生成 core dump 文件的脚本
- _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。
......
......@@ -23,7 +23,7 @@ TDengine 分布式架构的逻辑结构图如下:
**虚拟节点(vnode):** 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中 V2,V3,V4 等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个 DB,但一个 DB 可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的 schema、标签值等。一个虚拟节点由所属的数据节点的 EP,以及所属的 VGroup ID 在系统内唯一标识,由管理节点创建并管理。
**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(开源版最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M0,M1,M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步,任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。
**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M0,M1,M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步,任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。
**虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vgroup)来保证系统的高可靠。虚拟节点组内采取 master/slave 的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个 DB 的副本数为 N,系统必须有至少 N 数据节点。副本数在创建 DB 时通过参数 replica 可以指定,缺省为 1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的 ID,VGroup ID。如果两个虚拟节点的 VGroup ID 相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID 是永远不变的,即使一个虚拟节点组被删除,它的 ID 也不会被收回重复利用。
......
......@@ -108,7 +108,7 @@ taos>
也可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。
```
curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
```
输出示例如下:
......@@ -148,7 +148,7 @@ docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-604
使用 curl 命令验证 RESTful 接口可以正常工作:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" 127.0.0.1:6041/rest/sql
```
输出示例如下:
......
......@@ -2,6 +2,8 @@
title: 2.6
---
[2.6.0.6](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.6)
[2.6.0.4](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.4)
[2.6.0.1](https://github.com/taosdata/TDengine/releases/tag/ver-2.6.0.1)
......
......@@ -2,6 +2,8 @@
title: 2.4
---
[2.4.0.30](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.30)
[2.4.0.26](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.26)
[2.4.0.25](https://github.com/taosdata/TDengine/releases/tag/ver-2.4.0.25)
......
......@@ -182,14 +182,10 @@ cd "${curr_dir}"
# 2. cmake executable file
compile_dir="${top_dir}/debug"
if [ -d ${compile_dir} ]; then
${csudo}rm -rf ${compile_dir}
rm -rf ${compile_dir}
fi
if [ "$osType" != "Darwin" ]; then
${csudo}mkdir -p ${compile_dir}
else
mkdir -p ${compile_dir}
fi
mkdir -p ${compile_dir}
cd ${compile_dir}
if [[ "$allocator" == "jemalloc" ]]; then
......@@ -256,9 +252,9 @@ if [ "$osType" != "Darwin" ]; then
echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then
${csudo}rm -rf ${output_dir}
rm -rf ${output_dir}
fi
${csudo}mkdir -p ${output_dir}
mkdir -p ${output_dir}
cd ${script_dir}/deb
${csudo}./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
......@@ -281,9 +277,9 @@ if [ "$osType" != "Darwin" ]; then
echo "====do rpm package for the centos system===="
output_dir="${top_dir}/rpms"
if [ -d ${output_dir} ]; then
${csudo}rm -rf ${output_dir}
rm -rf ${output_dir}
fi
${csudo}mkdir -p ${output_dir}
mkdir -p ${output_dir}
cd ${script_dir}/rpm
${csudo}./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
......
......@@ -199,7 +199,6 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || :
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
......@@ -214,7 +213,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
if [ "$verMode" == "cluster" ]; then
......
......@@ -178,7 +178,6 @@ function install_bin() {
if [ "$osType" != "Darwin" ]; then
${csudo}rm -f ${bin_link_dir}/perfMonitor || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || :
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}cp -r ${binary_dir}/build/bin/${clientName} ${install_main_dir}/bin || :
......@@ -192,7 +191,6 @@ function install_bin() {
${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/remove.sh ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/run_${serverName}_and_${adapterName}.sh ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/startPre.sh ${install_main_dir}/bin
${csudo}chmod 0555 ${install_main_dir}/bin/*
......@@ -204,7 +202,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${demoName} ] && ${csudo}ln -s ${install_main_dir}/bin/${demoName} ${bin_link_dir}/${demoName} || :
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/run_${serverName}_and_${adapterName}.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
else
......
......@@ -91,7 +91,6 @@ else
${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh \
${script_dir}/set_core.sh \
${script_dir}/run_taosd_and_taosadapter.sh \
${script_dir}/startPre.sh \
${script_dir}/taosd-dump-cfg.gdb"
fi
......@@ -158,7 +157,6 @@ if [ $adapterName != "taosadapter" ]; then
sed -i "s/taosadapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
# !!! do not change taosadaptor here
mv ${install_dir}/bin/taosadapter ${install_dir}/bin/${adapterName}
mv ${install_dir}/bin/run_taosd_and_taosadapter.sh ${install_dir}/bin/run_${serverName}_and_${adapterName}.sh
mv ${install_dir}/bin/taosd-dump-cfg.gdb ${install_dir}/bin/${serverName}-dump-cfg.gdb
fi
......
......@@ -81,7 +81,6 @@ function clean_bin() {
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || :
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
}
......
#!/bin/bash
[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter &
taosd
......@@ -453,6 +453,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
return rowLen;
}
static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const char *tableName, const char *ddl) {
SSqlRes *pRes = &pSql->res;
......@@ -473,6 +474,7 @@ static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const c
STR_WITH_MAXSIZE_TO_VARSTR(dst, ddl, pField->bytes);
return 0;
}
static int32_t tscSCreateBuildResult(SSqlObj *pSql, BuildType type, const char *str, const char *result) {
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
int32_t rowLen = tscSCreateBuildResultFields(pSql, type, result);
......@@ -480,6 +482,7 @@ static int32_t tscSCreateBuildResult(SSqlObj *pSql, BuildType type, const char *
tscFieldInfoUpdateOffset(pQueryInfo);
return tscSCreateSetValueToResObj(pSql, rowLen, str, result);
}
int32_t tscRebuildCreateTableStatement(void *param,char *result) {
SCreateBuilder *builder = (SCreateBuilder *)param;
int32_t code = TSDB_CODE_SUCCESS;
......@@ -533,8 +536,8 @@ static int32_t tscGetDBInfo(SCreateBuilder *builder, char *result) {
memset(buf, 0, sizeof(buf));
int32_t* lengths = taos_fetch_lengths(pSql);
int32_t ret = tscGetNthFieldResult(row, fields, lengths, 0, buf);
if (0 == ret && STR_NOCASE_EQUAL(buf, strlen(buf), builder->buf, strlen(builder->buf))) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE DATABASE %s", buf);
if (0 == ret && 0 == strcmp(buf, builder->buf)) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE DATABASE `%s`", buf);
for (int i = 1; i < num_fields; i++) {
for (int j = 0; showColumns[j][0] != NULL; j++) {
if (STR_NOCASE_EQUAL(fields[i].name, strlen(fields[i].name), showColumns[j][0], strlen(showColumns[j][0]))) {
......
......@@ -1028,7 +1028,7 @@ static int32_t applyDataPointsWithSqlInsert(TAOS* taos, TAOS_SML_DATA_POINT* poi
if (info->numBatches >= MAX_SML_SQL_INSERT_BATCHES) {
tscError("SML:0x%"PRIx64" Apply points failed. exceeds max sql insert batches", info->id);
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
code = TSDB_CODE_TSC_TOO_MANY_SML_LINES;
goto cleanup;
}
......@@ -1047,7 +1047,7 @@ static int32_t applyDataPointsWithSqlInsert(TAOS* taos, TAOS_SML_DATA_POINT* poi
tscDebug("SML:0x%"PRIx64" sql: %s" , info->id, batch->sql);
if (info->numBatches >= MAX_SML_SQL_INSERT_BATCHES) {
tscError("SML:0x%"PRIx64" Apply points failed. exceeds max sql insert batches", info->id);
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
code = TSDB_CODE_TSC_TOO_MANY_SML_LINES;
goto cleanup;
}
bool batchesExecuted[MAX_SML_SQL_INSERT_BATCHES] = {false};
......
......@@ -606,8 +606,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_DROP_TABLE:
case TSDB_SQL_DROP_USER:
case TSDB_SQL_DROP_ACCT:
case TSDB_SQL_DROP_DNODE:
case TSDB_SQL_DROP_DB: {
case TSDB_SQL_DROP_DNODE: {
const char* msg2 = "invalid name";
const char* msg3 = "param name too long";
......@@ -626,14 +625,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
}
if (pInfo->type == TSDB_SQL_DROP_DB) {
assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
code = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pzName);
if (code != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else if (pInfo->type == TSDB_SQL_DROP_TABLE) {
if (pInfo->type == TSDB_SQL_DROP_TABLE) {
assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
code = tscSetTableFullName(&pTableMetaInfo->name, &sTblToken, pSql, dbIncluded);
......@@ -656,11 +648,12 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
break;
}
case TSDB_SQL_DROP_DB:
case TSDB_SQL_USE_DB: {
const char* msg = "invalid db name";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken, false, NULL) != TSDB_CODE_SUCCESS) {
if (tscValidateName(pToken, true, NULL) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
}
......@@ -707,7 +700,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
char buf[TSDB_DB_NAME_LEN] = {0};
SStrToken token = taosTokenDup(&pCreateDB->dbname, buf, tListLen(buf));
if (tscValidateName(&token, false, NULL) != TSDB_CODE_SUCCESS) {
if (tscValidateName(&token, true, NULL) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
......@@ -816,7 +809,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken, false, NULL) != TSDB_CODE_SUCCESS) {
if (tscValidateName(pToken, true, NULL) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
......@@ -1639,7 +1632,7 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) {
}
// field name must be unique
if (has(pFieldList, i + 1, pField->name) == true) {
if (has(pFieldList, i, pField->name) == true) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return false;
}
......@@ -1698,7 +1691,7 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
return false;
}
if (has(pTagsList, i + 1, p->name) == true) {
if (has(pTagsList, i, p->name) == true) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return false;
}
......@@ -1727,8 +1720,8 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
// field name must be unique
for (int32_t i = 0; i < numOfTags; ++i) {
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
if (has(pFieldList, 0, p->name) == true) {
size_t numOfCols = taosArrayGetSize(pFieldList);
if (has(pFieldList, numOfCols, p->name) == true) {
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
return false;
}
......@@ -1864,7 +1857,7 @@ int32_t validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
// field name must be unique
for (int32_t i = 0; i < numOfTags + numOfCols; ++i) {
if (strncasecmp(pColField->name, pSchema[i].name, sizeof(pColField->name) - 1) == 0) {
if (strncmp(pColField->name, pSchema[i].name, sizeof(pColField->name) - 1) == 0) {
//return tscErrorMsgWithCode(TSDB_CODE_TSC_DUP_COL_NAMES, tscGetErrorMsgPayload(pCmd), pColField->name, NULL);
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "duplicated column names");
}
......@@ -1874,9 +1867,8 @@ int32_t validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
}
/* is contained in pFieldList or not */
static bool has(SArray* pFieldList, int32_t startIdx, const char* name) {
size_t numOfCols = taosArrayGetSize(pFieldList);
for (int32_t j = startIdx; j < numOfCols; ++j) {
static bool has(SArray* pFieldList, int32_t endIdx, const char* name) {
for (int32_t j = 0; j < endIdx; ++j) {
TAOS_FIELD* field = taosArrayGet(pFieldList, j);
if (strncmp(name, field->name, sizeof(field->name) - 1) == 0) return true;
}
......@@ -2277,10 +2269,6 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
hasDistinct = (pItem->distinct == true);
distIdx = hasDistinct ? i : -1;
}
if(pItem->aliasName != NULL && validateColumnName(pItem->aliasName) != TSDB_CODE_SUCCESS){
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11);
}
if(pItem->aliasName != NULL && strcasecmp(pItem->aliasName, DEFAULT_PRIMARY_TIMESTAMP_COL_NAME) == 0){
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11);
}
......@@ -4110,7 +4098,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (pDbPrefixToken->n <= 0) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
if (tscValidateName(pDbPrefixToken, false, NULL) != TSDB_CODE_SUCCESS) {
if (tscValidateName(pDbPrefixToken, true, NULL) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
......@@ -8023,7 +8011,9 @@ int32_t validateColumnName(char* name) {
return validateColumnName(token.z);
} else if (token.type == TK_ID) {
stringProcess(name, token.n);
return TSDB_CODE_SUCCESS;
if (strlen(name) == 0) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
} else {
if (isNumber(&token)) {
return TSDB_CODE_TSC_INVALID_OPERATION;
......
......@@ -3068,6 +3068,12 @@ int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded)
}
}
if (escapeEnabled && pToken->type == TK_ID) {
if (pToken->z[0] == TS_BACKQUOTE_CHAR) {
pToken->n = stringProcess(pToken->z, pToken->n);
firstPartQuote = true;
}
}
int32_t firstPartLen = pToken->n;
pToken->z = sep + 1;
......
......@@ -116,6 +116,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TSC_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x0226) //"Invalid timestamp precision type")
#define TSDB_CODE_TSC_RES_TOO_MANY TAOS_DEF_ERROR_CODE(0, 0x0227) //"Result set too large to be output")
#define TSDB_CODE_TSC_INVALID_SCHEMA_VERSION TAOS_DEF_ERROR_CODE(0, 0x0228) //"invalid table schema version")
#define TSDB_CODE_TSC_TOO_MANY_SML_LINES TAOS_DEF_ERROR_CODE(0, 0x0229) //"too many lines in batch")
// mnode
#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed"
......
......@@ -323,7 +323,7 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
int64_t oresult = atomic_load_64(&result);
if (regex_match(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) {
if (regex_match(command, "^\\s*use\\s+([a-zA-Z0-9_]+|`.+`)\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) {
fprintf(stdout, "Database changed.\n\n");
fflush(stdout);
......
Subproject commit 28a49b447f71c4f014ebbac858b7215b897d57fd
Subproject commit 7105027650b51e701cfa1dac11b8fb42d447dd01
......@@ -1235,7 +1235,7 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) {
static int32_t mnodeFindSuperTableTagIndex(SSTableObj *pStable, const char *tagName) {
SSchema *schema = (SSchema *) pStable->schema;
for (int32_t tag = 0; tag < pStable->numOfTags; tag++) {
if (strcasecmp(schema[pStable->numOfColumns + tag].name, tagName) == 0) {
if (strcmp(schema[pStable->numOfColumns + tag].name, tagName) == 0) {
return tag;
}
}
......@@ -1388,7 +1388,7 @@ static int32_t mnodeModifySuperTableTagName(SMnodeMsg *pMsg, char *oldTagName, c
static int32_t mnodeFindSuperTableColumnIndex(SSTableObj *pStable, char *colName) {
SSchema *schema = (SSchema *) pStable->schema;
for (int32_t col = 0; col < pStable->numOfColumns; col++) {
if (strcasecmp(schema[col].name, colName) == 0) {
if (strcmp(schema[col].name, colName) == 0) {
return col;
}
}
......
......@@ -703,8 +703,8 @@ static int32_t monBuildMnodesTotalSql(char *sql) {
for (int i = 0; i < num_fields; ++i) {
if (strcmp(fields[i].name, "role") == 0) {
int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
if (strncmp((char *)row[i], "master", charLen) == 0 ||
strncmp((char *)row[i], "slave", charLen) == 0) {
if (strncmp((char *)row[i], "leader", charLen) == 0 ||
strncmp((char *)row[i], "follower", charLen) == 0) {
totalMnodesAlive += 1;
}
}
......@@ -719,13 +719,13 @@ static int32_t monBuildMnodesTotalSql(char *sql) {
static int32_t monGetVgroupsTotalStats(char *dbName, int32_t *totalVgroups,
int32_t *totalVgroupsAlive) {
char subsql[TSDB_DB_NAME_LEN + 14];
char subsql[TSDB_DB_NAME_LEN + 16];
memset(subsql, 0, sizeof(subsql));
snprintf(subsql, TSDB_DB_NAME_LEN + 13, "show %s.vgroups", dbName);
snprintf(subsql, sizeof(subsql) - 1, "show `%s`.vgroups", dbName);
TAOS_RES *result = taos_query(tsMonitor.conn, subsql);
int32_t code = taos_errno(result);
if (code != TSDB_CODE_SUCCESS) {
monError("failed to execute cmd: show %s.vgroups, reason:%s", dbName, tstrerror(code));
monError("failed to execute cmd: show `%s`.vgroups, reason:%s", dbName, tstrerror(code));
}
TAOS_ROW row;
......@@ -794,8 +794,8 @@ static int32_t monGetVnodesTotalStats(char *ep, int32_t *totalVnodes,
for (int i = 0; i < num_fields; ++i) {
if (strcmp(fields[i].name, "status") == 0) {
int32_t charLen = monGetRowElemCharLen(fields[i], (char *)row[i]);
if (strncmp((char *)row[i], "master", charLen) == 0 ||
strncmp((char *)row[i], "slave", charLen) == 0) {
if (strncmp((char *)row[i], "leader", charLen) == 0 ||
strncmp((char *)row[i], "follower", charLen) == 0) {
*totalVnodesAlive += 1;
}
}
......@@ -1110,11 +1110,11 @@ static uint32_t monBuildVgroupsInfoSql(char *sql, char *dbName) {
int64_t ts = taosGetTimestampUs();
memset(sql, 0, SQL_LENGTH + 1);
snprintf(sql, SQL_LENGTH, "show %s.vgroups", dbName);
snprintf(sql, SQL_LENGTH, "show `%s`.vgroups", dbName);
TAOS_RES *result = taos_query(tsMonitor.conn, sql);
int32_t code = taos_errno(result);
if (code != TSDB_CODE_SUCCESS) {
monError("failed to execute cmd: show %s.vgroups, reason:%s", dbName, tstrerror(code));
monError("failed to execute cmd: show `%s`.vgroups, reason:%s", dbName, tstrerror(code));
}
TAOS_ROW row;
......
......@@ -2912,7 +2912,8 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
tMemBucket * pMemBucket = ppInfo->pMemBucket;
if (pMemBucket == NULL || pMemBucket->total == 0) { // check for null
assert(ppInfo->numOfElems == 0);
if (ppInfo->stage > 0)
assert(ppInfo->numOfElems == 0);
setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes);
} else {
SET_DOUBLE_VAL((double *)pCtx->pOutput, getPercentile(pMemBucket, v));
......
......@@ -1588,6 +1588,8 @@ static bool initGroupbyInfo(const SSDataBlock *pSDataBlock, const SGroupbyExpr *
return true;
}
pInfo->pGroupbyDataInfo = taosArrayInit(pGroupbyExpr->numOfGroupCols, sizeof(SGroupbyDataInfo));
// head put key length (int32_t type)
pInfo->totalBytes = sizeof(int32_t);
for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) {
SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, k);
......@@ -1624,7 +1626,8 @@ static void buildGroupbyKeyBuf(const SSDataBlock *pSDataBlock, SGroupbyOperatorI
*buf = NULL;
return;
}
*buf = p;
*buf = p;
p += sizeof(int32_t);
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pGroupbyDataInfo); i++) {
SGroupbyDataInfo *pDataInfo = taosArrayGet(pInfo->pGroupbyDataInfo, i);
......@@ -1646,26 +1649,22 @@ static void buildGroupbyKeyBuf(const SSDataBlock *pSDataBlock, SGroupbyOperatorI
memcpy(p, MULTI_KEY_DELIM, strlen(MULTI_KEY_DELIM));
p += strlen(MULTI_KEY_DELIM);
}
// calc keyLen and save
int32_t keyLen = (p - *buf) - sizeof(int32_t);
*(int32_t *)(*buf) = keyLen;
}
static bool isGroupbyKeyEqual(void *a, void *b, void *ext) {
SGroupbyOperatorInfo *pInfo = (SGroupbyOperatorInfo *)ext;
if (memcmp(a, b, pInfo->totalBytes) == 0) {
return true;
int32_t len1 = *(int32_t *)a;
int32_t len2 = *(int32_t *)b;
if (len1 != len2) {
return false;
}
int32_t offset = 0;
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pGroupbyDataInfo); i++) {
SGroupbyDataInfo *pDataInfo = taosArrayGet(pInfo->pGroupbyDataInfo, i);
char *a1 = (char *)a + sizeof(int32_t);
char *b1 = (char *)b + sizeof(int32_t);
char *k1 = (char *)a + offset;
char *k2 = (char *)b + offset;
if (getComparFunc(pDataInfo->type, 0)(k1, k2) != 0) {
return false;
}
offset += pDataInfo->bytes;
offset += (int32_t)strlen(MULTI_KEY_DELIM);
}
return true;
return memcmp(a1, b1, len1) == 0;
}
static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pInfo, SSDataBlock *pSDataBlock) {
......@@ -1708,7 +1707,9 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo);
}
int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, pInfo->totalBytes, item->groupIndex);
char *preKey = pInfo->prevData + sizeof(int32_t);
int32_t keyLen = *(int32_t *)pInfo->prevData;
int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, preKey, type, keyLen, item->groupIndex);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
}
......@@ -1730,7 +1731,9 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo);
}
int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, pInfo->totalBytes, item->groupIndex);
char *preKey = pInfo->prevData + sizeof(int32_t);
int32_t keyLen = *(int32_t *)pInfo->prevData;
int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, preKey, type, keyLen, item->groupIndex);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
}
......@@ -4312,14 +4315,15 @@ void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunction
// find colid in dataBlock
int32_t bytes, offset = 0;
char* val = NULL;
char* prevData = pInfo->prevData + sizeof(int32_t); // head is key length (int32_t type)
for (int32_t idx = 0; idx < taosArrayGetSize(pInfo->pGroupbyDataInfo); idx++) {
SGroupbyDataInfo *pDataInfo = taosArrayGet(pInfo->pGroupbyDataInfo, idx);
if (pDataInfo->index == pExpr1->colInfo.colId) {
bytes = pDataInfo->bytes;
val = pInfo->prevData + offset;
val = prevData + offset;
break;
}
offset += pDataInfo->bytes;
offset += pDataInfo->bytes + strlen(MULTI_KEY_DELIM); // multi value split by MULTI_KEY_DELIM
}
if (val == NULL) { continue; }
......
......@@ -68,8 +68,8 @@ char* syncRole[] = {
"offline",
"unsynced",
"syncing",
"slave",
"master"
"follower",
"leader"
};
char *syncStatus[] = {
......
......@@ -986,7 +986,7 @@ static SMemRow getSMemRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order,
return rmem;
} else {
pCheckInfo->chosen = CHECKINFO_CHOSEN_BOTH;
extraRow = rimem;
*extraRow = rimem;
return rmem;
}
} else {
......@@ -1298,7 +1298,7 @@ static int32_t offsetSkipBlock(STsdbQueryHandle* q, SBlockInfo* pBlockInfo, int6
range.from = i;
}
}
range.to = 0;
range.to = sblock;
taosArrayPush(pArray, &range);
range.from = -1;
break;
......@@ -1314,7 +1314,7 @@ static int32_t offsetSkipBlock(STsdbQueryHandle* q, SBlockInfo* pBlockInfo, int6
if(range.from == -1) {
range.from = i;
} else {
if(range.to + 1 != i) {
if(range.to - 1 != i) {
// add the previous
taosArrayPush(pArray, &range);
range.from = i;
......@@ -1359,16 +1359,17 @@ static void shrinkBlocksByQuery(STsdbQueryHandle *pQueryHandle, STableCheckInfo
SBlockIdx *compIndex = pQueryHandle->rhelper.pBlkIdx;
bool order = ASCENDING_TRAVERSE(pQueryHandle->order);
TSKEY s = TSKEY_INITIAL_VAL, e = TSKEY_INITIAL_VAL;
if (order) {
assert(pCheckInfo->lastKey <= pQueryHandle->window.ekey && pQueryHandle->window.skey <= pQueryHandle->window.ekey);
s = pQueryHandle->window.skey;
e = pQueryHandle->window.ekey;
} else {
assert(pCheckInfo->lastKey >= pQueryHandle->window.ekey && pQueryHandle->window.skey >= pQueryHandle->window.ekey);
e = pQueryHandle->window.skey;
s = pQueryHandle->window.ekey;
}
TSKEY s = TSKEY_INITIAL_VAL, e = TSKEY_INITIAL_VAL;
s = MIN(pCheckInfo->lastKey, pQueryHandle->window.ekey);
e = MAX(pCheckInfo->lastKey, pQueryHandle->window.ekey);
// discard the unqualified data block based on the query time window
int32_t start = binarySearchForBlock(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC);
if (s > pCompInfo->blocks[start].keyLast) {
......
......@@ -123,6 +123,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_VALUE_OUT_OF_RANGE, "Value out of range")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PROTOCOL_TYPE, "Invalid line protocol type")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_PRECISION_TYPE, "Invalid timestamp precision type")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_RES_TOO_MANY, "Result set too large to be output")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_TOO_MANY_SML_LINES, "Too many lines in batch")
// mnode
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed")
......
......@@ -807,6 +807,12 @@
4,,pytest,python3 test.py -f insert/line_insert.py
3,,pytest,python3 test.py -f tag_lite/binary.py
3,,pytest,python3 test.py -f query/filterAllIntTypes.py
3,,pytest,python3 test.py -f dbmgmt/dbNameCaseSensitive.py
3,,pytest,python3 test.py -f insert/schemalessCaseSensitive.py
3,,pytest,python3 test.py -f table/columnNameCaseSensitive.py
3,,pytest,python3 test.py -f table/columnNameValidation.py
3,,pytest,python3 test.py -f table/tagNameCaseSensitive.py
3,,pytest,python3 test.py -f table/tbNameCaseSensitive.py
3,,develop-test,python3 ./test.py -f 2-query/ts_hidden_column.py
3,,develop-test,python3 ./test.py -f 2-query/ts_shortcut.py
3,,develop-test,python3 ./test.py -f 2-query/nchar_funcs.py
......
......@@ -7,10 +7,11 @@ function usage() {
echo -e "\t -b branch"
echo -e "\t -l log dir"
echo -e "\t -o default timeout value"
echo -e "\t -w log web server"
echo -e "\t -h help"
}
while getopts "m:t:b:l:o:h" opt; do
while getopts "m:t:b:l:o:w:h" opt; do
case $opt in
m)
config_file=$OPTARG
......@@ -27,6 +28,9 @@ while getopts "m:t:b:l:o:h" opt; do
o)
timeout_param="-o $OPTARG"
;;
w)
web_server=$OPTARG
;;
h)
usage
exit 0
......@@ -59,10 +63,11 @@ if [ ! -f $t_file ]; then
exit 1
fi
date_tag=`date +%Y%m%d-%H%M%S`
test_log_dir=${branch}_${date_tag}
if [ -z $log_dir ]; then
log_dir="log/${branch}_${date_tag}"
log_dir="log/${test_log_dir}"
else
log_dir="$log_dir/${branch}_${date_tag}"
log_dir="$log_dir/${test_log_dir}"
fi
hosts=()
......@@ -134,14 +139,14 @@ function build_src() {
echo "$cmd"
${cmd}
if [ $? -ne 0 ]; then
flock -x $lock_file -c "echo \"${hosts[index]} TDengine build failed\" >>$log_dir/failed.log"
flock -x $lock_file -c "echo \"${hosts[index]} TDengine build failed\" >>${failed_case_file}"
return
fi
script=". ~/.bashrc;cd ${workdirs[index]}/taos-tools;git submodule update --init --recursive;mkdir -p build;cd build;cmake ..;make -j4"
cmd="${ssh_script} sh -c \"$script\""
${cmd}
if [ $? -ne 0 ]; then
flock -x $lock_file -c "echo \"${hosts[index]} taos-tools build failed\" >>$log_dir/failed.log"
flock -x $lock_file -c "echo \"${hosts[index]} taos-tools build failed\" >>${failed_case_file}"
return
fi
script="cp -rf ${workdirs[index]}/taos-tools/build/build/bin/* ${workdirs[index]}/TDinternal/debug/build/bin/;cp -rf ${workdirs[index]}/taos-tools/build/build/lib/* ${workdirs[index]}/TDinternal/debug/build/lib/;cp -rf ${workdirs[index]}/taos-tools/build/build/lib64/* ${workdirs[index]}/TDinternal/debug/build/lib/;cp -rf ${workdirs[index]}/TDinternal/debug/build/bin/taosBenchmark ${workdirs[index]}/TDinternal/debug/build/bin/taosdemo"
......@@ -191,6 +196,10 @@ function run_thread() {
local exec_dir=`echo "$line"|cut -d, -f3`
local case_cmd=`echo "$line"|cut -d, -f4`
local case_file=""
echo "$case_cmd"|grep -q "\.sh"
if [ $? -eq 0 ]; then
case_file=`echo "$case_cmd"|grep -o ".*\.sh"|awk '{print $NF}'`
fi
echo "$case_cmd"|grep -q "^python3"
if [ $? -eq 0 ]; then
case_file=`echo "$case_cmd"|grep -o ".*\.py"|awk '{print $NF}'`
......@@ -215,44 +224,54 @@ function run_thread() {
# echo "$thread_no $count $cmd"
local ret=0
local redo_count=1
local case_log_file=$log_dir/${case_file}.txt
start_time=`date +%s`
local case_index=`flock -x $lock_file -c "sh -c \"echo \\\$(( \\\$( cat $index_file ) + 1 )) | tee $index_file\""`
case_index=`printf "%5d" $case_index`
local case_info=`echo "$line"|cut -d, -f 3,4`
while [ ${redo_count} -lt 6 ]; do
if [ -f $log_dir/$case_file.log ]; then
cp $log_dir/$case_file.log $log_dir/$case_file.${redo_count}.redolog
if [ -f $case_log_file ]; then
cp $case_log_file $log_dir/$case_file.${redo_count}.redotxt
fi
echo "${hosts[index]}-${thread_no} order:${count}, redo:${redo_count} task:${line}" >$log_dir/$case_file.log
echo -e "\e[33m >>>>> \e[0m ${case_cmd}"
date >>$log_dir/$case_file.log
# $cmd 2>&1 | tee -a $log_dir/$case_file.log
echo "${hosts[index]}-${thread_no} order:${count}, redo:${redo_count} task:${line}" >$case_log_file
local current_time=`date "+%Y-%m-%d %H:%M:%S"`
echo -e "$case_index \e[33m START >>>>> \e[0m ${case_info} \e[33m[$current_time]\e[0m"
echo "$current_time" >>$case_log_file
local real_start_time=`date +%s`
# $cmd 2>&1 | tee -a $case_log_file
# ret=${PIPESTATUS[0]}
$cmd >>$log_dir/$case_file.log 2>&1
$cmd >>$case_log_file 2>&1
ret=$?
echo "${hosts[index]} `date` ret:${ret}" >>$log_dir/$case_file.log
local real_end_time=`date +%s`
local time_elapsed=$(( real_end_time - real_start_time ))
echo "execute time: ${time_elapsed}s" >>$case_log_file
current_time=`date "+%Y-%m-%d %H:%M:%S"`
echo "${hosts[index]} $current_time exit code:${ret}" >>$case_log_file
if [ $ret -eq 0 ]; then
break
fi
redo=0
grep -q "wait too long for taosd start" $log_dir/$case_file.log
grep -q "wait too long for taosd start" $case_log_file
if [ $? -eq 0 ]; then
redo=1
fi
grep -q "kex_exchange_identification: Connection closed by remote host" $log_dir/$case_file.log
grep -q "kex_exchange_identification: Connection closed by remote host" $case_log_file
if [ $? -eq 0 ]; then
redo=1
fi
grep -q "ssh_exchange_identification: Connection closed by remote host" $log_dir/$case_file.log
grep -q "ssh_exchange_identification: Connection closed by remote host" $case_log_file
if [ $? -eq 0 ]; then
redo=1
fi
grep -q "kex_exchange_identification: read: Connection reset by peer" $log_dir/$case_file.log
grep -q "kex_exchange_identification: read: Connection reset by peer" $case_log_file
if [ $? -eq 0 ]; then
redo=1
fi
grep -q "Database not ready" $log_dir/$case_file.log
grep -q "Database not ready" $case_log_file
if [ $? -eq 0 ]; then
redo=1
fi
grep -q "Unable to establish connection" $log_dir/$case_file.log
grep -q "Unable to establish connection" $case_log_file
if [ $? -eq 0 ]; then
redo=1
fi
......@@ -265,11 +284,18 @@ function run_thread() {
redo_count=$(( redo_count + 1 ))
done
end_time=`date +%s`
echo >>$log_dir/$case_file.log
echo "${hosts[index]} execute time: $(( end_time - start_time ))s" >>$log_dir/$case_file.log
echo >>$case_log_file
total_time=$(( end_time - start_time ))
echo "${hosts[index]} total time: ${total_time}s" >>$case_log_file
# echo "$thread_no ${line} DONE"
if [ $ret -ne 0 ]; then
flock -x $lock_file -c "echo \"${hosts[index]} ret:${ret} ${line}\" >>$log_dir/failed.log"
if [ $ret -eq 0 ]; then
echo -e "$case_index \e[34m DONE <<<<< \e[0m ${case_info} \e[34m[${total_time}s]\e[0m \e[32m success\e[0m"
else
if [ ! -z ${web_server} ]; then
flock -x $lock_file -c "echo -e \"${hosts[index]} ret:${ret} ${line}\n ${web_server}/$test_log_dir/${case_file}.txt\" >>${failed_case_file}"
else
flock -x $lock_file -c "echo -e \"${hosts[index]} ret:${ret} ${line}\n log file: ${case_log_file}\" >>${failed_case_file}"
fi
mkdir -p $log_dir/${case_file}.coredump
local remote_coredump_dir="${workdirs[index]}/tmp/thread_volume/$thread_no/coredump"
local scpcmd="sshpass -p ${passwords[index]} scp -o StrictHostKeyChecking=no -r ${usernames[index]}@${hosts[index]}"
......@@ -278,14 +304,16 @@ function run_thread() {
fi
cmd="$scpcmd:${remote_coredump_dir}/* $log_dir/${case_file}.coredump/"
$cmd # 2>/dev/null
local case_info=`echo "$line"|cut -d, -f 3,4`
local corefile=`ls $log_dir/${case_file}.coredump/`
corefile=`find $log_dir/${case_file}.coredump/ -name "core.*"`
echo -e "$case_info \e[31m failed\e[0m"
corefile=`find $log_dir/${case_file}.coredump/ -name "core*"`
echo -e "$case_index \e[34m DONE <<<<< \e[0m ${case_info} \e[34m[${total_time}s]\e[0m \e[31m failed\e[0m"
echo "=========================log============================"
cat $log_dir/$case_file.log
cat $case_log_file
echo "====================================================="
echo -e "\e[34m log file: $log_dir/$case_file.log \e[0m"
echo -e "\e[34m log file: $case_log_file \e[0m"
if [ ! -z "${web_server}" ]; then
echo "${web_server}/$test_log_dir/${case_file}.txt"
fi
if [ ! -z "$corefile" ]; then
echo -e "\e[34m corefiles: $corefile \e[0m"
local build_dir=$log_dir/build_${hosts[index]}
......@@ -320,6 +348,10 @@ mkdir -p $log_dir
rm -rf $log_dir/*
task_file=$log_dir/$$.task
lock_file=$log_dir/$$.lock
index_file=$log_dir/case_index.txt
stat_file=$log_dir/stat.txt
failed_case_file=$log_dir/failed.txt
echo "0" >$index_file
i=0
while [ $i -lt ${#hosts[*]} ]; do
......@@ -328,10 +360,6 @@ while [ $i -lt ${#hosts[*]} ]; do
i=$(( i + 1 ))
done
wait
# if [ -f "$log_dir/failed.log" ]; then
# cat $log_dir/failed.log
# exit 1
# fi
i=0
j=0
......@@ -357,15 +385,45 @@ rm -f $lock_file
rm -f $task_file
# docker ps -a|grep -v CONTAINER|awk '{print $1}'|xargs docker rm -f
echo "====================================================================="
echo "log dir: $log_dir"
total_cases=`cat $index_file`
failed_cases=0
if [ -f $failed_case_file ]; then
if [ ! -z "$web_server" ]; then
failed_cases=`grep -v "$web_server" $failed_case_file|wc -l`
else
failed_cases=`grep -v "log file:" $failed_case_file|wc -l`
fi
fi
success_cases=$(( total_cases - failed_cases ))
echo "Total Cases: $total_cases" >$stat_file
echo "Successful: $success_cases" >>$stat_file
echo "Failed: $failed_cases" >>$stat_file
cat $stat_file
RET=0
i=1
if [ -f "$log_dir/failed.log" ]; then
if [ -f "${failed_case_file}" ]; then
echo "====================================================="
while read line; do
if [ ! -z "${web_server}" ]; then
echo "$line"|grep -q "${web_server}"
if [ $? -eq 0 ]; then
echo " $line"
continue
fi
else
echo "$line"|grep -q "log file:"
if [ $? -eq 0 ]; then
echo " $line"
continue
fi
fi
line=`echo "$line"|cut -d, -f 3,4`
echo -e "$i. $line \e[31m failed\e[0m" >&2
i=$(( i + 1 ))
done <$log_dir/failed.log
done <${failed_case_file}
RET=1
fi
......
......@@ -50,7 +50,7 @@ class TDTestCase:
ret = tdSql.query('show mnodes')
tdSql.checkRows(1)
tdSql.checkData(0, 2, "master")
tdSql.checkData(0, 2, "leader")
role_time = tdSql.getData(0, 3)
create_time = tdSql.getData(0, 4)
......@@ -73,7 +73,7 @@ class TDTestCase:
ret = tdSql.query('show vnodes "{}"'.format(dnodeEndpoint))
tdSql.checkRows(1)
tdSql.checkData(0, 0, 2)
tdSql.checkData(0, 1, "master")
tdSql.checkData(0, 1, "leader")
cmd = "taos -h 127.0.0.1 -s 'show databases'"
r = os.popen(cmd)
......
......@@ -30,22 +30,22 @@ class ClusterTestcase:
tdSql.execute("use %s" % ctest.dbName)
tdSql.query("show vgroups")
for i in range(10):
tdSql.checkData(i, 5, "master")
tdSql.checkData(i, 5, "leader")
tdSql.execute("alter database %s replica 2" % ctest.dbName)
tdLog.sleep(30)
tdSql.query("show vgroups")
for i in range(10):
tdSql.checkData(i, 5, "master")
tdSql.checkData(i, 7, "slave")
tdSql.checkData(i, 5, "leader")
tdSql.checkData(i, 7, "follower")
tdSql.execute("alter database %s replica 3" % ctest.dbName)
tdLog.sleep(30)
tdSql.query("show vgroups")
for i in range(10):
tdSql.checkData(i, 5, "master")
tdSql.checkData(i, 7, "slave")
tdSql.checkData(i, 9, "slave")
tdSql.checkData(i, 5, "leader")
tdSql.checkData(i, 7, "follower")
tdSql.checkData(i, 9, "follower")
ct = ClusterTestcase()
ct.run()
\ No newline at end of file
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self._conn = conn
def run(self):
# database name
tdSql.execute("create database db")
tdSql.query("show databases")
tdSql.checkRows(1)
tdSql.error("create database Db")
tdSql.error("create database `db`")
tdSql.execute("create database `Db`")
tdSql.query("show databases")
tdSql.checkRows(2)
tdSql.execute("alter database db cachelast 1")
tdSql.execute("alter database `Db` cachelast 1")
tdSql.execute("use db")
tdSql.query("select database()")
tdSql.checkData(0, 0, 'db');
tdSql.query("show db.vgroups")
tdSql.checkRows(0)
tdSql.execute("use `Db`")
tdSql.query("select database()")
tdSql.checkData(0, 0, 'Db');
tdSql.query("show `Db`.vgroups")
tdSql.checkRows(0)
tdSql.query("show create database `Db`")
tdSql.checkRows(1)
sql = tdSql.getData(0, 1)
tdSql.checkEqual(True, sql.startswith("CREATE DATABASE `Db`"))
tdSql.execute("drop database db")
tdSql.execute("drop database `Db`")
tdSql.query("show databases")
tdSql.checkRows(0)
# corner cases
tdSql.execute("create database `电力系统`")
tdSql.query("show `电力系统`.vgroups")
tdSql.checkRows(0)
tdSql.query("show databases")
tdSql.checkRows(1)
tdSql.checkData(0, 0, "电力系统")
tdSql.query("show create database `电力系统`")
sql = tdSql.getData(0, 1)
tdSql.checkEqual(True, sql.startswith("CREATE DATABASE `电力系统`"))
tdSql.error("create database ``")
tdSql.execute("create database ` `")
tdSql.error("create database ` `")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from util.log import *
from util.cases import *
from util.sql import *
from util.types import TDSmlProtocolType, TDSmlTimestampType
import json
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self._conn = conn
def run(self):
# influxDB Line Protocol
self.influxDBLineProtocol()
# OpenTSDB Line Protocol
self.openTSDBLineProtocol()
# OpenTSDB JSON Protocol
self.openTSDBJSONProtocol()
def influxDBLineProtocol(self):
print("===== influxDB Line Protocol Case Sensitive Test =====\n")
tdSql.execute("create database influxdb precision 'ns' ")
tdSql.execute("use influxdb")
lines = [
"St,deviceId=1i voltage=1,phase=\"Test\" 1626006833639000000",
"St,DeviceId=3i voltage=2,phase=\"Test\" 1626006833639000000",
"St,deviceId=2i,DeviceId=3 Voltage=2,Phase=\"Test2\" 1626006833639000000",
"St,deviceId=4i,DeviceId=3 voltage=1,phase=\"Test\",Voltage=2,Phase=\"Test1\" 1626006833639000000",
"tbl,deviceId=\"sensor0\" Hello=3i 1646053743694400029",
"tbl,deviceId=\"sensor0\" n=3i,N=4i 1646053743694400030",
"tbl,deviceId=\"sensor0\" g=3i 1646053743694400031",
"tbl,deviceId=\"sensor0\" G=3i 1646053743694400032",
"tbl,deviceId=\"sensor0\" nice=2i,Nice=3i 1646053743694400033",
"tbl,deviceId=\"sensor0\" hello=3i 1646053743694400034",
"超级表,deviceId=\"sensor0\" 电压=3i 1646053743694400035",
]
self._conn.schemaless_insert(lines, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
tdSql.query("show stables")
tdSql.checkRows(3)
tdSql.query("show tables")
tdSql.checkRows(6)
tdSql.query("describe `St`")
tdSql.checkRows(7)
tdSql.query("select * from `St`")
tdSql.checkRows(4)
tdSql.query("select * from tbl")
tdSql.checkRows(6)
tdSql.query("select * from `超级表`")
tdSql.checkRows(1)
def openTSDBLineProtocol(self):
print("===== OpenTSDB Line Protocol Case Sensitive Test =====\n")
tdSql.execute("create database opentsdbline")
tdSql.execute("use opentsdbline")
# format: <metric> <timestamp> <value> <tagk_1>=<tagv_1>[ <tagk_n>=<tagv_n>]
lines = [
"meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
"meters.Current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
"meters.Current 1648432611249 10.8 Location=California.LosAngeles groupid=3",
"meters.Current 1648432611249 10.8 Location=California.LosAngeles location=California.SanFrancisco groupid=3",
"Meters.current 1648432611250 11.3 location=California.LosAngeles Groupid=3",
"电表 1648432611250 11.3 位置=California.LosAngeles Groupid=3"
]
self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, None)
tdSql.query("show stables")
tdSql.checkRows(4)
tdSql.query("show tables")
tdSql.checkRows(6)
tdSql.query("describe `meters.Current`")
tdSql.checkRows(5)
tdSql.checkData(2, 0, "groupid")
tdSql.checkData(3, 0, "location")
tdSql.checkData(4, 0, "Location")
tdSql.query("describe `Meters.current`")
tdSql.checkRows(4)
tdSql.checkData(2, 0, "Groupid")
tdSql.checkData(3, 0, "location")
tdSql.query("describe `电表`")
tdSql.checkRows(4)
tdSql.checkData(2, 0, "Groupid")
tdSql.checkData(3, 0, "位置")
def openTSDBJSONProtocol(self):
print("===== OpenTSDB JSON Protocol Case Sensitive Test =====\n")
tdSql.execute("create database opentsdbjson")
tdSql.execute("use opentsdbjson")
lines = [
{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
{"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"Location": "California.LosAngeles", "groupid": 1}},
{"metric": "meters.Current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
{"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}},
{"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "Location": "California.SanFrancisco", "groupid": 2}},
{"metric": "电压", "timestamp": 1648432611250, "value": 221, "tags": {"位置": "California.LosAngeles", "groupid": 1}}
]
self._conn.schemaless_insert([json.dumps(lines)], TDSmlProtocolType.JSON.value, None)
tdSql.query("show stables")
tdSql.checkRows(4)
tdSql.query("show tables")
tdSql.checkRows(6)
tdSql.query("describe `meters.Current`")
tdSql.checkRows(4)
tdSql.query("describe `meters.voltage`")
tdSql.checkRows(5)
tdSql.checkData(3, 0, "Location")
tdSql.checkData(4, 0, "location")
tdSql.query("describe `电压`")
tdSql.checkRows(4)
tdSql.checkData(3, 0, "位置")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
......@@ -53,7 +53,7 @@ class TwoClients:
tdSql.query("show mnodes")
tdSql.checkRows(3)
roles = "master slave"
roles = "leader follower"
for i in range(tdSql.queryRows):
if (tdSql.queryResult[i][2] in roles ):
ep = tdSql.queryResult[i][1]
......
......@@ -88,6 +88,23 @@ class TDTestCase:
tdSql.query("select count(*) from tb group by c1")
tdSql.checkRows(0)
# TS-1619
tdSql.execute("create database test")
tdSql.execute("use test")
tdSql.execute("create table stb(ts timestamp, c1 int, c2 nchar(30)) tags(t1 int)")
for i in range(3):
tdSql.execute("create table t%d using stb tags(%d)" % (i, i))
sql = "insert into t%d values " % i
for j in range(16):
if j % 4 == 0:
s = '00'
else:
s = str (j % 4 * 15)
sql += "(%d, %d, '2022-06-01 0%d:%s')" % (self.ts + j, i, int( j / 4 ), s)
tdSql.execute(sql)
tdSql.query("select c2, sum(c1) from stb group by c2")
tdSql.checkRows(16)
def stop(self):
tdSql.close()
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
tdSql.prepare()
# column
tdSql.execute("create table tb(ts timestamp, c1 int)")
tdSql.execute("create table `TB`(ts timestamp, c1 int)")
tdSql.error("alter table tb add column C1 int")
tdSql.execute("alter table tb add column `C1` int")
tdSql.error("alter table `TB` add column C1 int")
tdSql.execute("alter table `TB` add column `C1` int")
tdSql.error("create table tb2(ts timestamp, c1 int, C1 int)")
tdSql.execute("create table tb2(ts timestamp, c1 int, `C1` int)")
tdSql.query("describe tb2")
tdSql.checkRows(3)
tdSql.checkData(0, 0, 'ts')
tdSql.checkData(1, 0, 'c1')
tdSql.checkData(2, 0, 'C1')
tdSql.execute("insert into tb2(ts, c1) values(now, 1)")
tdSql.execute("insert into tb2(ts, `C1`) values(now + 1s, 1)")
tdSql.execute("insert into tb2(ts, c1, `C1`) values(now + 2s, 1, 2)")
tdSql.query("select * from tb2")
tdSql.checkRows(3)
tdSql.query("select * from tb2 where c1 = 1")
tdSql.checkRows(2)
tdSql.query("select * from tb2 where `C1` = 1")
tdSql.checkRows(1)
tdSql.query("select c1 `C1` from tb2 where `C1` = 1")
tdSql.checkRows(1)
tdSql.query("select c1 as `C1` from tb2 where `C1` = 1")
tdSql.checkRows(1)
tdSql.query("select `C1` a from tb2 where `C1` = 1")
tdSql.checkRows(1)
tdSql.query("select `C1` as a from tb2 where `C1` = 1")
tdSql.checkRows(1)
tdSql.execute("alter table tb2 drop column c1")
tdSql.query("describe tb2")
tdSql.checkRows(2)
tdSql.error("create table `TB2`(ts timestamp, c1 int, C1 int)")
tdSql.execute("create table `TB2`(ts timestamp, c1 int, `C1` int)")
tdSql.query("describe `TB2`")
tdSql.checkRows(3)
tdSql.checkData(0, 0, 'ts')
tdSql.checkData(1, 0, 'c1')
tdSql.checkData(2, 0, 'C1')
tdSql.execute("insert into `TB2`(ts, c1) values(now, 1)")
tdSql.execute("insert into `TB2`(ts, `C1`) values(now + 1s, 1)")
tdSql.execute("insert into `TB2`(ts, c1, `C1`) values(now + 2s, 1, 2)")
tdSql.query("select * from `TB2`")
tdSql.checkRows(3)
tdSql.query("select * from `TB2` where c1 = 1")
tdSql.checkRows(2)
tdSql.query("select * from `TB2` where `C1` = 1")
tdSql.checkRows(1)
tdSql.query("select c1 `C1` from `TB2` where `C1` = 1")
tdSql.checkRows(1)
tdSql.query("select c1 as `C1` from `TB2` where `C1` = 1")
tdSql.checkRows(1)
tdSql.query("select `C1` a from `TB2` where `C1` = 1")
tdSql.checkRows(1)
tdSql.query("select `C1` as a from `TB2` where `C1` = 1")
tdSql.checkRows(1)
tdSql.execute("alter table `TB2` drop column `C1`")
tdSql.query("describe tb2")
tdSql.checkRows(2)
tdSql.error("create table `STB2`(ts timestamp, c1 int, C1 int) tags (t1 int)")
tdSql.execute("create table `STB2`(ts timestamp, c1 int, `C1` int) tags (t1 int)")
tdSql.query("describe `STB2`")
tdSql.checkRows(4)
tdSql.checkData(0, 0, 'ts')
tdSql.checkData(1, 0, 'c1')
tdSql.checkData(2, 0, 'C1')
tdSql.checkData(3, 0, 't1')
tdSql.execute("insert into tt2(ts, c1) using `STB2` tags(1) values(now, 1)")
tdSql.execute("insert into tt2(ts, `C1`) using `STB2` tags(1) values(now + 1s, 1)")
tdSql.execute("insert into tt2(ts, c1, `C1`) using `STB2` tags(1) values(now + 2s, 1, 2)")
tdSql.query("select * from `STB2`")
tdSql.checkRows(3)
tdSql.query("select * from `STB2` where c1 = 1")
tdSql.checkRows(2)
tdSql.query("select * from `STB2` where `C1` = 1")
tdSql.checkRows(1)
tdSql.query("select c1 `C1` from `STB2` where `C1` = 1")
tdSql.checkRows(1)
tdSql.query("select c1 as `C1` from `STB2` where `C1` = 1")
tdSql.checkRows(1)
tdSql.query("select `C1` a from `STB2` where `C1` = 1")
tdSql.checkRows(1)
tdSql.query("select `C1` as a from `STB2` where `C1` = 1")
tdSql.checkRows(1)
tdSql.query("show create table `STB2`")
tdSql.checkData(0, 1, "CREATE TABLE `STB2` (`ts` TIMESTAMP,`c1` INT,`C1` INT) TAGS (`t1` INT)")
tdSql.execute("alter table `STB2` drop column `C1`")
tdSql.query("describe tb2")
tdSql.checkRows(2)
# cornor cases
tdSql.execute("alter table `STB2` add column `数量` int")
tdSql.execute("insert into tt3(ts, `数量`) using `STB2` tags(2) values(now + 3s, 1)")
tdSql.query("show create table `STB2`")
tdSql.checkData(0, 1, "CREATE TABLE `STB2` (`ts` TIMESTAMP,`c1` INT,`数量` INT) TAGS (`t1` INT)")
tdSql.query("select * from tt3")
tdSql.checkRows(1)
tdSql.query("select ts `TS` from tt3")
tdSql.checkRows(1)
tdSql.query("select ts as `TS` from tt3")
tdSql.checkRows(1)
tdSql.query("select ts as `时间戳` from tt3")
tdSql.checkRows(1)
tdSql.query("select ts `时间戳` from tt3")
tdSql.checkRows(1)
tdSql.error("create table tt4(`` timestamp, c1 int)")
tdSql.error("create table tt4(` ` timestamp, ` ` int)")
tdSql.error("create table tt4(`tb1` timestamp, `tb1` int)")
ts = 1656040651000
tdSql.execute("create table `T4`(` ` timestamp, c1 int, `C1` int)")
tdSql.execute("insert into `T4`(` `, `C1`) values(%d, 1)" % ts)
tdSql.query("select * from `T4`")
tdSql.checkRows(1)
tdSql.execute("delete from `T4` where ` ` = '2022-06-24 11:17:31.000'")
tdSql.query("select * from `T4`")
tdSql.checkRows(0)
tdSql.error("alter table `T4` add column `` double")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
# -*- coding: utf-8 -*-
import sys
import string
import random
import subprocess
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tdSql.query('show tables')
tdSql.checkRows(0)
# uniqueness
tdSql.error("create table t (t timestamp, f int, F int)")
tdSql.error("create table t (t timestamp, `f` int, F int)")
tdSql.error("create table t (t timestamp, `f` int, `f` int)")
tdSql.execute("create table t (t timestamp, `f` int, `F` int)")
tdSql.query("show tables")
tdSql.checkRows(1)
tdSql.execute("drop table t")
tdSql.error("create table t (t timestamp, f int, `F` int) tags (T int)")
tdSql.error("create table t (t timestamp, f int, `F` int) tags (`T` int, `T` int)")
tdSql.execute("create table t (t timestamp, f int, `F` int) tags (`T` int)")
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.execute("drop table t")
# non-emptiness
tdSql.error("create table t (t timestamp, `` int)")
tdSql.error("create table t (t timestamp, `f` int) tags (`` int)")
tdSql.query("show tables")
tdSql.checkRows(0)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self._conn = conn
def run(self):
tdSql.prepare()
# tag
tdSql.error("create table `STB3`(ts timesatmp, c1 int) tags(t1 int, T1 int)")
tdSql.execute("create table `STB3`(ts timestamp, c1 int) tags(t1 int)")
tdSql.execute("alter table `STB3` add tag `T1` int")
tdSql.execute("create table `STB4`(ts timestamp, c1 int) tags(t1 int, `T1` int)")
tdSql.execute("create table tt3 using `STB3`(t1) tags(1)")
tdSql.execute("create table tt4 using `STB3`(`T1`) tags(1)")
tdSql.query("select t1, `T1` from `STB3`")
tdSql.checkRows(2)
tdSql.query("show create table `STB3`")
tdSql.checkData(0, 1, "CREATE TABLE `STB3` (`ts` TIMESTAMP,`c1` INT) TAGS (`t1` INT,`T1` INT)")
tdSql.execute("alter table `STB3` drop tag `T1`")
tdSql.query("describe `STB3`")
tdSql.checkRows(3)
# cornor case
tdSql.execute("create table `STB5`(ts timestamp, c1 int) tags(t1 int, `标签` int)")
tdSql.execute("insert into `测试` using `STB5` tags(1, 1) values(now, 1)")
tdSql.query("show create table `STB5`")
tdSql.checkData(0, 1, "CREATE TABLE `STB5` (`ts` TIMESTAMP,`c1` INT) TAGS (`t1` INT,`标签` INT)")
tdSql.query("select * from `测试`")
tdSql.checkRows(1)
tdSql.query("select `标签` t from `测试`")
tdSql.checkRows(1)
tdSql.execute("alter table `STB5` add tag `标签2` double")
tdSql.query("describe `STB5`")
tdSql.checkRows(5)
ts = 1656040651000
tdSql.error("create table `STB6`(ts timestamp, c1 int) tags(`` int)")
tdSql.error("create table `STB6`(ts timestamp, c1 int) tags(` ` int, ` ` binary(20))")
tdSql.execute("create table `STB6`(ts timestamp, c1 int) tags(` ` int)")
tdSql.execute("insert into tb6 using `STB6` tags(1) values(%d, 1)(%d, 2)(%d, 3)" % (ts, ts + 1000, ts + 2000))
tdSql.execute("insert into tb7 using `STB6` tags(2) values(%d, 1)(%d, 2)(%d, 3)" % (ts, ts + 1000, ts + 2000))
tdSql.query("select * from `STB6`")
tdSql.checkRows(6)
tdSql.execute("delete from `STB6` where ` ` = 1 and ts = '2022-06-24 11:17:31.000'")
tdSql.checkAffectedRows(1)
tdSql.query("select * from `STB6`")
tdSql.checkRows(5)
tdSql.execute("delete from `STB6` where ` ` = 2")
tdSql.checkAffectedRows(3)
tdSql.query("select * from `STB6`")
tdSql.checkRows(2)
tdSql.error("alter table `STB6` add tag `` nchar(20)")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
# table/stable
tdSql.execute("create database test")
tdSql.execute("create database `Test`")
tdSql.execute("use test")
tdSql.execute("create table tb(ts timestamp, c1 int)")
tdSql.query("show tables")
tdSql.checkRows(1)
tdSql.query("show create table tb")
tdSql.checkRows(1)
tdSql.checkData(0, 1, "CREATE TABLE `tb` (`ts` TIMESTAMP,`c1` INT)")
tdSql.error("create table Tb(ts timestamp, c1 int)")
tdSql.execute("create table `TB`(ts timestamp, c1 int)")
tdSql.query("show tables")
tdSql.checkRows(2)
tdSql.query("show create table `TB`")
tdSql.checkRows(1)
tdSql.checkData(0, 1, "CREATE TABLE `TB` (`ts` TIMESTAMP,`c1` INT)")
tdSql.query("describe tb")
tdSql.checkRows(2)
tdSql.query("describe `TB`")
tdSql.checkRows(2)
tdSql.execute("insert into tb values(now, 1)")
tdSql.error("select * from `Test`.tb")
tdSql.query("select * from test.tb")
tdSql.checkRows(1)
tdSql.execute("insert into `TB` values(now, 1)")
tdSql.error("select * from `Test`.`TB`")
tdSql.query("select * from test.`TB`")
tdSql.checkRows(1)
tdSql.execute("create stable stb(ts timestamp, c1 int) tags(t1 int)")
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.error("crate stable STb(ts timestamp, c1 int) tags(t1 int)")
tdSql.error("create stable `stb`(ts timestamp, c1 int) tags(t1 int)")
tdSql.execute("create stable `STB`(ts timestamp, c1 int) tags(t1 int)")
tdSql.query("show stables")
tdSql.checkRows(2)
tdSql.query("describe stb")
tdSql.checkRows(3)
tdSql.query("describe `STB`")
tdSql.checkRows(3)
tdSql.execute("insert into t1 using stb tags(1) values(now, 1)")
tdSql.query("select * from stb")
tdSql.checkRows(1)
tdSql.execute("insert into t2 using `STB` tags(1) values(now, 1)")
tdSql.query("select * from `STB`")
tdSql.checkRows(1)
tdSql.execute("insert into `T2` using `STB` tags(1) values(now + 1s, 1)")
tdSql.query("select * from `STB`")
tdSql.checkRows(2)
tdSql.query("select tbname from `STB`")
tdSql.checkRows(2)
tdSql.execute("alter table stb add column c2 int")
tdSql.execute("alter table stb add tag t2 int")
tdSql.execute("alter table `STB` add column c2 int")
tdSql.execute("alter table `STB` add tag t2 int")
tdSql.execute("alter table `TB` add column c2 int")
tdSql.query("show create table `STB`")
tdSql.checkData(0, 1, "CREATE TABLE `STB` (`ts` TIMESTAMP,`c1` INT,`c2` INT) TAGS (`t1` INT,`t2` INT)")
# corner cases
tdSql.execute("create table `超级表`(ts timestamp, c1 int) tags(t1 int)")
tdSql.execute("create table `子表一` using `超级表` tags(1)")
tdSql.execute("insert into `子表二` using `超级表` tags(1) values(now, 1)")
tdSql.query("select * from `超级表`")
tdSql.checkRows(1)
tdSql.query("select * from `子表二`")
tdSql.checkRows(1)
tdSql.query("show tables")
tdSql.checkRows(7)
tdSql.execute("create table `普通表` (ts timestamp, c1 int)")
tdSql.execute("insert into `普通表` values(now, 2)")
tdSql.query("select * from `普通表`")
tdSql.checkRows(1)
tdSql.query("show tables")
tdSql.checkRows(8)
tdSql.query("show create table `普通表`")
tdSql.checkData(0, 1, "CREATE TABLE `普通表` (`ts` TIMESTAMP,`c1` INT)")
tdSql.error("create table `` (ts timestamp, c1 int)")
tdSql.execute("create table ` ` (ts timestamp, c1 int)")
tdSql.error("create table ` ` (ts timestamp, c1 int)")
ts = 1656040651000
tdSql.execute("insert into ` ` values(%d, 1)" % ts)
tdSql.query("select * from ` `")
tdSql.checkRows(1)
tdSql.execute("delete from ` `")
tdSql.checkAffectedRows(1)
tdSql.query("select * from ` `")
tdSql.checkRows(0)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
......@@ -109,7 +109,7 @@ sql show mnodes -x step1
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step2
endi
......@@ -186,7 +186,7 @@ sql show mnodes -x step3
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step3
endi
......@@ -323,7 +323,7 @@ sql show mnodes -x step9
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step9
endi
......@@ -419,7 +419,7 @@ sql show mnodes -x step10
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step10
endi
......@@ -482,7 +482,7 @@ sql show mnodes -x step1xx
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step1xx
endi
......
......@@ -62,7 +62,7 @@ sql show mnodes -x step2
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step2
endi
......@@ -115,7 +115,7 @@ sql show mnodes -x step5
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step5
endi
......@@ -185,7 +185,7 @@ sql show mnodes -x step7
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step7
endi
......@@ -240,7 +240,7 @@ sql show mnodes -x step9
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step9
endi
......@@ -314,7 +314,7 @@ sql show mnodes -x step10
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step10
endi
......@@ -369,7 +369,7 @@ sql show mnodes -x step12
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step12
endi
......
......@@ -81,7 +81,7 @@ sql show mnodes -x step3
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step3
endi
......@@ -155,7 +155,7 @@ sql show mnodes -x step5
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step5
endi
......@@ -287,7 +287,7 @@ sql show mnodes -x step9
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step9
endi
......@@ -381,7 +381,7 @@ sql show mnodes -x step10
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step10
endi
......@@ -441,7 +441,7 @@ sql show mnodes -x step12
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step12
endi
......
......@@ -57,7 +57,7 @@ step3:
sql show mnodes
print dnode1 role $data2_1
if $data2_1 != master then
if $data2_1 != leader then
goto step3
endi
......
......@@ -54,13 +54,13 @@ print mnode2Role $mnode2Role
$mnode3Role = $data2_3
print mnode3Role $mnode3Role
if $mnode1Role != master then
if $mnode1Role != leader then
goto show1
endi
if $mnode2Role != slave then
if $mnode2Role != follower then
goto show1
endi
if $mnode3Role != slave then
if $mnode3Role != follower then
goto show1
endi
......
......@@ -54,13 +54,13 @@ print mnode2Role $mnode2Role
$mnode3Role = $data2_3
print mnode3Role $mnode3Role
if $mnode1Role != master then
if $mnode1Role != leader then
goto step1
endi
if $mnode2Role != slave then
if $mnode2Role != follower then
goto step1
endi
if $mnode3Role != slave then
if $mnode3Role != follower then
goto step1
endi
......
......@@ -67,7 +67,7 @@ sql show mnodes
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step1
endi
......
......@@ -60,13 +60,13 @@ sql show mnodes
print mnode1 $data2_1
print mnode1 $data2_2
print mnode1 $data2_3
if $data2_1 != master then
if $data2_1 != leader then
goto step1
endi
if $data2_2 != slave then
if $data2_2 != follower then
goto step1
endi
if $data2_3 != slave then
if $data2_3 != follower then
goto step1
endi
......
......@@ -76,10 +76,10 @@ endi
if $data06 != 2 then
return -1
endi
if $data05 != master then
if $data05 != leader then
return -1
endi
if $data07 != slave then
if $data07 != follower then
return -1
endi
......@@ -125,10 +125,10 @@ endi
if $data06 != 2 then
goto step4
endi
if $data05 != master then
if $data05 != leader then
goto step4
endi
if $data07 != slave then
if $data07 != follower then
goto step4
endi
......@@ -189,7 +189,7 @@ endi
if $data05 != offline then
goto step5
endi
if $data07 != master then
if $data07 != leader then
goto step5
endi
......@@ -251,10 +251,10 @@ endi
if $data06 != 2 then
goto step6
endi
if $data05 != slave then
if $data05 != follower then
goto step6
endi
if $data07 != master then
if $data07 != leader then
goto step6
endi
......@@ -337,7 +337,7 @@ endi
if $data06 != 2 then
goto step7
endi
if $data05 != master then
if $data05 != leader then
goto step7
endi
if $data07 != offline then
......@@ -422,10 +422,10 @@ endi
if $data06 != 2 then
goto step8
endi
if $data05 != master then
if $data05 != leader then
goto step8
endi
if $data07 != slave then
if $data07 != follower then
goto step8
endi
......@@ -521,7 +521,7 @@ endi
if $data05 != offline then
goto step7
endi
if $data07 != master then
if $data07 != leader then
goto step7
endi
......
......@@ -74,13 +74,13 @@ print mnode2Role $mnode2Role
$mnode3Role = $data2_3
print mnode3Role $mnode3Role
if $mnode1Role != master then
if $mnode1Role != leader then
goto step1
endi
if $mnode2Role != slave then
if $mnode2Role != follower then
goto step1
endi
if $mnode3Role != slave then
if $mnode3Role != follower then
goto step1
endi
......
# Test case describe: dnode1 is only mnode, dnode2/dnode3 are only vnode
# step 1: start dnode1
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode)
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is leader-vnode)
# step 2: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841)
# step 3: insert old data(now-20d) and new data(now-40d), control data rows in order to save in cache, not falling disc
# step 4: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2
......@@ -145,7 +145,7 @@ if $dnode2Status != ready then
goto wait_dnode3_offline
endi
sleep $sleepTimer # waitting for move master vnode of dnode2 to dnode3
sleep $sleepTimer # waitting for move leader vnode of dnode2 to dnode3
# check using select
sql select count(*) from $stb
print data00 $data00
......
# Test case describe: dnode1 is only mnode, dnode2/dnode3 are only vnode
# step 1: start dnode1
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode)
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is leader-vnode)
# step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841)
# step 4: insert old data(now-20d) and new data(now-40d), control data rows in order to save in cache, not falling disc
# step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2
......
......@@ -144,7 +144,7 @@ if $dnode3Vtatus != offline then
sleep 2000
goto wait_dnode3_vgroup_offline
endi
if $dnode2Vtatus != master then
if $dnode2Vtatus != leader then
sleep 2000
goto wait_dnode3_vgroup_offline
endi
......@@ -209,11 +209,11 @@ $dnode2Vtatus = $data7_2
print dnode2Vtatus: $dnode3Vtatus
print dnode3Vtatus: $dnode3Vtatus
if $dnode3Vtatus != slave then
if $dnode3Vtatus != follower then
sleep 2000
goto wait_dnode3_vgroup_slave
endi
if $dnode2Vtatus != master then
if $dnode2Vtatus != leader then
sleep 2000
goto wait_dnode3_vgroup_slave
endi
......@@ -325,11 +325,11 @@ $dnode2Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode2_vgroup_slave
endi
if $dnode2Vtatus != slave then
if $dnode2Vtatus != follower then
sleep 2000
goto wait_dnode2_vgroup_slave
endi
......
......@@ -146,7 +146,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......@@ -211,11 +211,11 @@ $dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
if $dnode4Vtatus != slave then
if $dnode4Vtatus != follower then
sleep 2000
goto wait_dnode4_vgroup_slave
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_slave
endi
......@@ -330,11 +330,11 @@ $dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
if $dnode4Vtatus != master then
if $dnode4Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_master
endi
if $dnode3Vtatus != slave then
if $dnode3Vtatus != follower then
sleep 2000
goto wait_dnode4_vgroup_master
endi
......@@ -393,11 +393,11 @@ $dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
if $dnode4Vtatus != master then
if $dnode4Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_master_2
endi
if $dnode3Vtatus != slave then
if $dnode3Vtatus != follower then
sleep 2000
goto wait_dnode4_vgroup_master_2
endi
......
......@@ -145,7 +145,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......@@ -218,11 +218,11 @@ $dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
if $dnode4Vtatus != slave then
if $dnode4Vtatus != follower then
sleep 2000
goto wait_dnode4_vgroup_slave
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_slave
endi
......@@ -292,7 +292,7 @@ $dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
if $dnode4Vtatus != master then
if $dnode4Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_master
endi
......
......@@ -151,7 +151,7 @@ if $dnode3Vtatus != offline then
sleep 2000
goto wait_dnode3_vgroup_offline
endi
if $dnode2Vtatus != master then
if $dnode2Vtatus != leader then
sleep 2000
goto wait_dnode3_vgroup_offline
endi
......@@ -237,11 +237,11 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
$dnode2Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode2Vtatus != master then
if $dnode2Vtatus != leader then
sleep 2000
goto wait_dnode3_vgroup_slave
endi
if $dnode3Vtatus != slave then
if $dnode3Vtatus != follower then
sleep 2000
goto wait_dnode3_vgroup_slave
endi
......@@ -320,7 +320,7 @@ if $dnode2Vtatus != offline then
sleep 2000
goto wait_dnode3_vgroup_master
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode3_vgroup_master
endi
......
......@@ -203,7 +203,7 @@ if $dnode2Vtatus != offline then
sleep 2000
goto wait_dnode3_vgroup_master
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode3_vgroup_master
endi
......@@ -328,11 +328,11 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus != slave then
if $dnode2Vtatus != follower then
sleep 2000
goto wait_dnode3_vgroup_master_1
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode3_vgroup_master_1
endi
......
......@@ -165,7 +165,7 @@ if $dnode2Vtatus != offline then
sleep 2000
goto wait_dnode3_vgroup_master
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode3_vgroup_master
endi
......@@ -290,11 +290,11 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus != slave then
if $dnode2Vtatus != follower then
sleep 2000
goto wait_dnode3_vgroup_master_1
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode3_vgroup_master_1
endi
......
......@@ -148,7 +148,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......@@ -206,11 +206,11 @@ $dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
if $dnode4Vtatus != slave then
if $dnode4Vtatus != follower then
sleep 2000
goto wait_dnode4_vgroup_slave
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_slave
endi
......@@ -306,11 +306,11 @@ $dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
if $dnode4Vtatus != master then
if $dnode4Vtatus != leader then
sleep 2000
goto wait_dnode3_vgroup_slave
endi
if $dnode3Vtatus != slave then
if $dnode3Vtatus != follower then
sleep 2000
goto wait_dnode3_vgroup_slave
endi
......@@ -422,11 +422,11 @@ $dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
if $dnode4Vtatus != master then
if $dnode4Vtatus != leader then
sleep 2000
goto wait_dnode2_vgroup_slave
endi
if $dnode3Vtatus != slave then
if $dnode3Vtatus != follower then
sleep 2000
goto wait_dnode2_vgroup_slave
endi
......
......@@ -151,7 +151,7 @@ if $dnode3Vtatus != offline then
sleep 2000
goto wait_dnode3_vgroup_offline
endi
if $dnode2Vtatus != master then
if $dnode2Vtatus != leader then
sleep 2000
goto wait_dnode3_vgroup_offline
endi
......@@ -237,11 +237,11 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus != master then
if $dnode2Vtatus != leader then
sleep 2000
goto wait_dnode3_vgroup_slave
endi
if $dnode3Vtatus != slave then
if $dnode3Vtatus != follower then
sleep 2000
goto wait_dnode3_vgroup_slave
endi
......@@ -320,7 +320,7 @@ if $dnode2Vtatus != offline then
sleep 2000
goto wait_dnode3_vgroup_master
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode3_vgroup_master
endi
......
......@@ -268,7 +268,7 @@ if $dnode3Vtatus != offline then
sleep 2000
goto wait_dnode2_vgroup_master
endi
if $dnode2Vtatus != master then
if $dnode2Vtatus != leader then
sleep 2000
goto wait_dnode2_vgroup_master
endi
......
......@@ -104,7 +104,7 @@ $mnode2Status = $data2_2
$mnode3Status = $data2_3
#$mnode4Status = $data2_4
if $mnode1Status != master then
if $mnode1Status != leader then
return -1
endi
......
# Test case describe: dnode1 is only mnode, dnode2/dnode3 are only vnode
# step 1: start dnode1
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode)
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is leader-vnode)
# step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841)
# step 4: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
# step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2
......@@ -157,7 +157,7 @@ if $dnode3Status != ready then
goto wait_dnode2_offline
endi
sleep $sleepTimer # waitting for move master vnode of dnode2 to dnode3
sleep $sleepTimer # waitting for move leader vnode of dnode2 to dnode3
# check using select
sql select count(*) from $stb
print data00 $data00
......
......@@ -146,7 +146,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -151,7 +151,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -224,7 +224,7 @@ if $data2_1 != offline then
sleep 2000
goto wait_dnode2_master
endi
if $data2_2 != master then
if $data2_2 != leader then
sleep 2000
goto wait_dnode2_master
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -148,7 +148,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -146,7 +146,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......@@ -210,7 +210,7 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != slave then
if $dnode4Vtatus != follower then
sleep 2000
goto wait_dnode4_vgroup_slave
endi
......@@ -243,7 +243,7 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != master then
if $dnode4Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_master
endi
......@@ -317,7 +317,7 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != slave then
if $dnode4Vtatus != follower then
sleep 2000
goto wait_dnode4_vgroup_slave_2
endi
......@@ -350,7 +350,7 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != master then
if $dnode4Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_master_2
endi
......@@ -440,7 +440,7 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != slave then
if $dnode4Vtatus != follower then
sleep 2000
goto wait_dnode4_vgroup_slave_3
endi
......@@ -473,7 +473,7 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != master then
if $dnode4Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_master_3
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -150,7 +150,7 @@ if $dnode4Vtatus != offline then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
if $dnode3Vtatus != master then
if $dnode3Vtatus != leader then
sleep 2000
goto wait_dnode4_vgroup_offline
endi
......
......@@ -192,10 +192,10 @@ print dnode1 ==> $dnode1Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
if $dnode1Role != master then
if $dnode1Role != leader then
return -1
endi
if $dnode3Role != slave then
if $dnode3Role != follower then
return -1
endi
......@@ -236,7 +236,7 @@ print dnode1 ==> $dnode1Role
print dnode3 ==> $dnode3Role
print dnode4 ==> $dnode4Role
if $dnode1Role != master then
if $dnode1Role != leader then
return -1
endi
......@@ -274,11 +274,11 @@ $dnode4Role = $data2_4
print dnode1 ==> $dnode1Role
print dnode4 ==> $dnode4Role
if $dnode1Role != master then
if $dnode1Role != leader then
return -1
endi
if $dnode4Role != slave then
if $dnode4Role != follower then
return -1
endi
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册